[PATCH] add AOP_TRUNCATED_PAGE, prepend AOP_ to WRITEPAGE_ACTIVATE

readpage(), prepare_write(), and commit_write() callers are updated to
understand the special return code AOP_TRUNCATED_PAGE in the style of
writepage() and WRITEPAGE_ACTIVATE.  AOP_TRUNCATED_PAGE tells the caller that
the callee has unlocked the page and that the operation should be tried again
with a new page.  OCFS2 uses this to detect and work around a lock inversion in
its aop methods.  There should be no change in behaviour for methods that don't
return AOP_TRUNCATED_PAGE.

WRITEPAGE_ACTIVATE is also prepended with AOP_ for consistency and they are
made enums so that kerneldoc can be used to document their semantics.

Signed-off-by: Zach Brown <zach.brown@oracle.com>
This commit is contained in:
Zach Brown
2005-12-15 14:28:17 -08:00
committed by Joel Becker
parent 7063fbf226
commit 994fc28c7b
9 changed files with 113 additions and 45 deletions

View File

@@ -831,8 +831,13 @@ readpage:
/* Start the actual read. The read will unlock the page. */
error = mapping->a_ops->readpage(filp, page);
if (unlikely(error))
if (unlikely(error)) {
if (error == AOP_TRUNCATED_PAGE) {
page_cache_release(page);
goto find_page;
}
goto readpage_error;
}
if (!PageUptodate(page)) {
lock_page(page);
@@ -1152,26 +1157,24 @@ static int fastcall page_cache_read(struct file * file, unsigned long offset)
{
struct address_space *mapping = file->f_mapping;
struct page *page;
int error;
int ret;
page = page_cache_alloc_cold(mapping);
if (!page)
return -ENOMEM;
do {
page = page_cache_alloc_cold(mapping);
if (!page)
return -ENOMEM;
ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
if (ret == 0)
ret = mapping->a_ops->readpage(file, page);
else if (ret == -EEXIST)
ret = 0; /* losing race to add is OK */
error = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
if (!error) {
error = mapping->a_ops->readpage(file, page);
page_cache_release(page);
return error;
}
/*
* We arrive here in the unlikely event that someone
* raced with us and added our page to the cache first
* or we are out of memory for radix-tree nodes.
*/
page_cache_release(page);
return error == -EEXIST ? 0 : error;
} while (ret == AOP_TRUNCATED_PAGE);
return ret;
}
#define MMAP_LOTSAMISS (100)
@@ -1331,10 +1334,14 @@ page_not_uptodate:
goto success;
}
if (!mapping->a_ops->readpage(file, page)) {
error = mapping->a_ops->readpage(file, page);
if (!error) {
wait_on_page_locked(page);
if (PageUptodate(page))
goto success;
} else if (error == AOP_TRUNCATED_PAGE) {
page_cache_release(page);
goto retry_find;
}
/*
@@ -1358,10 +1365,14 @@ page_not_uptodate:
goto success;
}
ClearPageError(page);
if (!mapping->a_ops->readpage(file, page)) {
error = mapping->a_ops->readpage(file, page);
if (!error) {
wait_on_page_locked(page);
if (PageUptodate(page))
goto success;
} else if (error == AOP_TRUNCATED_PAGE) {
page_cache_release(page);
goto retry_find;
}
/*
@@ -1444,10 +1455,14 @@ page_not_uptodate:
goto success;
}
if (!mapping->a_ops->readpage(file, page)) {
error = mapping->a_ops->readpage(file, page);
if (!error) {
wait_on_page_locked(page);
if (PageUptodate(page))
goto success;
} else if (error == AOP_TRUNCATED_PAGE) {
page_cache_release(page);
goto retry_find;
}
/*
@@ -1470,10 +1485,14 @@ page_not_uptodate:
}
ClearPageError(page);
if (!mapping->a_ops->readpage(file, page)) {
error = mapping->a_ops->readpage(file, page);
if (!error) {
wait_on_page_locked(page);
if (PageUptodate(page))
goto success;
} else if (error == AOP_TRUNCATED_PAGE) {
page_cache_release(page);
goto retry_find;
}
/*
@@ -1934,12 +1953,16 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
status = a_ops->prepare_write(file, page, offset, offset+bytes);
if (unlikely(status)) {
loff_t isize = i_size_read(inode);
if (status != AOP_TRUNCATED_PAGE)
unlock_page(page);
page_cache_release(page);
if (status == AOP_TRUNCATED_PAGE)
continue;
/*
* prepare_write() may have instantiated a few blocks
* outside i_size. Trim these off again.
*/
unlock_page(page);
page_cache_release(page);
if (pos + bytes > isize)
vmtruncate(inode, isize);
break;
@@ -1952,6 +1975,10 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
cur_iov, iov_base, bytes);
flush_dcache_page(page);
status = a_ops->commit_write(file, page, offset, offset+bytes);
if (status == AOP_TRUNCATED_PAGE) {
page_cache_release(page);
continue;
}
if (likely(copied > 0)) {
if (!status)
status = copied;