readahead: combine file_ra_state.prev_index/prev_offset into prev_pos
Combine the file_ra_state members unsigned long prev_index unsigned int prev_offset into loff_t prev_pos It is more consistent and better supports huge files. Thanks to Peter for the nice proposal! [akpm@linux-foundation.org: fix shift overflow] Cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Fengguang Wu <wfg@mail.ustc.edu.cn> Cc: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
0bb7ba6b9c
commit
f4e6b498d6
@@ -143,7 +143,7 @@ static int ext3_readdir(struct file * filp,
|
|||||||
sb->s_bdev->bd_inode->i_mapping,
|
sb->s_bdev->bd_inode->i_mapping,
|
||||||
&filp->f_ra, filp,
|
&filp->f_ra, filp,
|
||||||
index, 1);
|
index, 1);
|
||||||
filp->f_ra.prev_index = index;
|
filp->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
|
||||||
bh = ext3_bread(NULL, inode, blk, 0, &err);
|
bh = ext3_bread(NULL, inode, blk, 0, &err);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -142,7 +142,7 @@ static int ext4_readdir(struct file * filp,
|
|||||||
sb->s_bdev->bd_inode->i_mapping,
|
sb->s_bdev->bd_inode->i_mapping,
|
||||||
&filp->f_ra, filp,
|
&filp->f_ra, filp,
|
||||||
index, 1);
|
index, 1);
|
||||||
filp->f_ra.prev_index = index;
|
filp->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
|
||||||
bh = ext4_bread(NULL, inode, blk, 0, &err);
|
bh = ext4_bread(NULL, inode, blk, 0, &err);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -447,7 +447,7 @@ fill_it:
|
|||||||
*/
|
*/
|
||||||
while (page_nr < nr_pages)
|
while (page_nr < nr_pages)
|
||||||
page_cache_release(pages[page_nr++]);
|
page_cache_release(pages[page_nr++]);
|
||||||
in->f_ra.prev_index = index;
|
in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
|
||||||
|
|
||||||
if (spd.nr_pages)
|
if (spd.nr_pages)
|
||||||
return splice_to_pipe(pipe, &spd);
|
return splice_to_pipe(pipe, &spd);
|
||||||
|
@@ -704,8 +704,7 @@ struct file_ra_state {
|
|||||||
|
|
||||||
unsigned int ra_pages; /* Maximum readahead window */
|
unsigned int ra_pages; /* Maximum readahead window */
|
||||||
int mmap_miss; /* Cache miss stat for mmap accesses */
|
int mmap_miss; /* Cache miss stat for mmap accesses */
|
||||||
unsigned long prev_index; /* Cache last read() position */
|
loff_t prev_pos; /* Cache last read() position */
|
||||||
unsigned int prev_offset; /* Offset where last read() ended in a page */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
13
mm/filemap.c
13
mm/filemap.c
@@ -879,8 +879,8 @@ void do_generic_mapping_read(struct address_space *mapping,
|
|||||||
cached_page = NULL;
|
cached_page = NULL;
|
||||||
index = *ppos >> PAGE_CACHE_SHIFT;
|
index = *ppos >> PAGE_CACHE_SHIFT;
|
||||||
next_index = index;
|
next_index = index;
|
||||||
prev_index = ra.prev_index;
|
prev_index = ra.prev_pos >> PAGE_CACHE_SHIFT;
|
||||||
prev_offset = ra.prev_offset;
|
prev_offset = ra.prev_pos & (PAGE_CACHE_SIZE-1);
|
||||||
last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
|
last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
|
||||||
offset = *ppos & ~PAGE_CACHE_MASK;
|
offset = *ppos & ~PAGE_CACHE_MASK;
|
||||||
|
|
||||||
@@ -966,7 +966,6 @@ page_ok:
|
|||||||
index += offset >> PAGE_CACHE_SHIFT;
|
index += offset >> PAGE_CACHE_SHIFT;
|
||||||
offset &= ~PAGE_CACHE_MASK;
|
offset &= ~PAGE_CACHE_MASK;
|
||||||
prev_offset = offset;
|
prev_offset = offset;
|
||||||
ra.prev_offset = offset;
|
|
||||||
|
|
||||||
page_cache_release(page);
|
page_cache_release(page);
|
||||||
if (ret == nr && desc->count)
|
if (ret == nr && desc->count)
|
||||||
@@ -1056,9 +1055,11 @@ no_cached_page:
|
|||||||
|
|
||||||
out:
|
out:
|
||||||
*_ra = ra;
|
*_ra = ra;
|
||||||
_ra->prev_index = prev_index;
|
_ra->prev_pos = prev_index;
|
||||||
|
_ra->prev_pos <<= PAGE_CACHE_SHIFT;
|
||||||
|
_ra->prev_pos |= prev_offset;
|
||||||
|
|
||||||
*ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
|
*ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
|
||||||
if (cached_page)
|
if (cached_page)
|
||||||
page_cache_release(cached_page);
|
page_cache_release(cached_page);
|
||||||
if (filp)
|
if (filp)
|
||||||
@@ -1396,7 +1397,7 @@ retry_find:
|
|||||||
* Found the page and have a reference on it.
|
* Found the page and have a reference on it.
|
||||||
*/
|
*/
|
||||||
mark_page_accessed(page);
|
mark_page_accessed(page);
|
||||||
ra->prev_index = page->index;
|
ra->prev_pos = (loff_t)page->index << PAGE_CACHE_SHIFT;
|
||||||
vmf->page = page;
|
vmf->page = page;
|
||||||
return ret | VM_FAULT_LOCKED;
|
return ret | VM_FAULT_LOCKED;
|
||||||
|
|
||||||
|
@@ -46,7 +46,7 @@ void
|
|||||||
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
|
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
|
||||||
{
|
{
|
||||||
ra->ra_pages = mapping->backing_dev_info->ra_pages;
|
ra->ra_pages = mapping->backing_dev_info->ra_pages;
|
||||||
ra->prev_index = -1;
|
ra->prev_pos = -1;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(file_ra_state_init);
|
EXPORT_SYMBOL_GPL(file_ra_state_init);
|
||||||
|
|
||||||
@@ -327,7 +327,7 @@ static unsigned long get_next_ra_size(struct file_ra_state *ra,
|
|||||||
* indicator. The flag won't be set on already cached pages, to avoid the
|
* indicator. The flag won't be set on already cached pages, to avoid the
|
||||||
* readahead-for-nothing fuss, saving pointless page cache lookups.
|
* readahead-for-nothing fuss, saving pointless page cache lookups.
|
||||||
*
|
*
|
||||||
* prev_index tracks the last visited page in the _previous_ read request.
|
* prev_pos tracks the last visited byte in the _previous_ read request.
|
||||||
* It should be maintained by the caller, and will be used for detecting
|
* It should be maintained by the caller, and will be used for detecting
|
||||||
* small random reads. Note that the readahead algorithm checks loosely
|
* small random reads. Note that the readahead algorithm checks loosely
|
||||||
* for sequential patterns. Hence interleaved reads might be served as
|
* for sequential patterns. Hence interleaved reads might be served as
|
||||||
@@ -351,12 +351,10 @@ ondemand_readahead(struct address_space *mapping,
|
|||||||
bool hit_readahead_marker, pgoff_t offset,
|
bool hit_readahead_marker, pgoff_t offset,
|
||||||
unsigned long req_size)
|
unsigned long req_size)
|
||||||
{
|
{
|
||||||
int max; /* max readahead pages */
|
int max = ra->ra_pages; /* max readahead pages */
|
||||||
|
pgoff_t prev_offset;
|
||||||
int sequential;
|
int sequential;
|
||||||
|
|
||||||
max = ra->ra_pages;
|
|
||||||
sequential = (offset - ra->prev_index <= 1UL) || (req_size > max);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* It's the expected callback offset, assume sequential access.
|
* It's the expected callback offset, assume sequential access.
|
||||||
* Ramp up sizes, and push forward the readahead window.
|
* Ramp up sizes, and push forward the readahead window.
|
||||||
@@ -369,6 +367,9 @@ ondemand_readahead(struct address_space *mapping,
|
|||||||
goto readit;
|
goto readit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
prev_offset = ra->prev_pos >> PAGE_CACHE_SHIFT;
|
||||||
|
sequential = offset - prev_offset <= 1UL || req_size > max;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Standalone, small read.
|
* Standalone, small read.
|
||||||
* Read as is, and do not pollute the readahead state.
|
* Read as is, and do not pollute the readahead state.
|
||||||
|
Reference in New Issue
Block a user