readahead: combine file_ra_state.prev_index/prev_offset into prev_pos

Combine the file_ra_state members
				unsigned long prev_index
				unsigned int prev_offset
into
				loff_t prev_pos

It is more consistent and better supports huge files.

Thanks to Peter for the nice proposal!

[akpm@linux-foundation.org: fix shift overflow]
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Fengguang Wu <wfg@mail.ustc.edu.cn>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Fengguang Wu
2007-10-16 01:24:33 -07:00
committed by Linus Torvalds
parent 0bb7ba6b9c
commit f4e6b498d6
6 changed files with 19 additions and 18 deletions

View File

@ -879,8 +879,8 @@ void do_generic_mapping_read(struct address_space *mapping,
cached_page = NULL;
index = *ppos >> PAGE_CACHE_SHIFT;
next_index = index;
prev_index = ra.prev_index;
prev_offset = ra.prev_offset;
prev_index = ra.prev_pos >> PAGE_CACHE_SHIFT;
prev_offset = ra.prev_pos & (PAGE_CACHE_SIZE-1);
last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
offset = *ppos & ~PAGE_CACHE_MASK;
@ -966,7 +966,6 @@ page_ok:
index += offset >> PAGE_CACHE_SHIFT;
offset &= ~PAGE_CACHE_MASK;
prev_offset = offset;
ra.prev_offset = offset;
page_cache_release(page);
if (ret == nr && desc->count)
@ -1056,9 +1055,11 @@ no_cached_page:
out:
*_ra = ra;
_ra->prev_index = prev_index;
_ra->prev_pos = prev_index;
_ra->prev_pos <<= PAGE_CACHE_SHIFT;
_ra->prev_pos |= prev_offset;
*ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
*ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
if (cached_page)
page_cache_release(cached_page);
if (filp)
@ -1396,7 +1397,7 @@ retry_find:
* Found the page and have a reference on it.
*/
mark_page_accessed(page);
ra->prev_index = page->index;
ra->prev_pos = (loff_t)page->index << PAGE_CACHE_SHIFT;
vmf->page = page;
return ret | VM_FAULT_LOCKED;