Btrfs: cache the extent state everywhere we possibly can V2
This patch just goes through and fixes everybody that does lock_extent() blah unlock_extent() to use lock_extent_bits() blah unlock_extent_cached() and pass around a extent_state so we only have to do the searches once per function. This gives me about a 3 mb/s boots on my random write test. I have not converted some things, like the relocation and ioctl's, since they aren't heavily used and the relocation stuff is in the middle of being re-written. I also changed the clear_extent_bit() to only unset the cached state if we are clearing EXTENT_LOCKED and related stuff, so we can do things like this lock_extent_bits() clear delalloc bits unlock_extent_cached() without losing our cached state. I tested this thoroughly and turned on LEAK_DEBUG to make sure we weren't leaking extent states, everything worked out fine. Signed-off-by: Josef Bacik <josef@redhat.com> Signed-off-by: Chris Mason <chris.mason@oracle.com>
This commit is contained in:
@@ -513,7 +513,10 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
u64 last_end;
|
||||
int err;
|
||||
int set = 0;
|
||||
int clear = 0;
|
||||
|
||||
if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
|
||||
clear = 1;
|
||||
again:
|
||||
if (!prealloc && (mask & __GFP_WAIT)) {
|
||||
prealloc = alloc_extent_state(mask);
|
||||
@@ -524,14 +527,20 @@ again:
|
||||
spin_lock(&tree->lock);
|
||||
if (cached_state) {
|
||||
cached = *cached_state;
|
||||
*cached_state = NULL;
|
||||
cached_state = NULL;
|
||||
|
||||
if (clear) {
|
||||
*cached_state = NULL;
|
||||
cached_state = NULL;
|
||||
}
|
||||
|
||||
if (cached && cached->tree && cached->start == start) {
|
||||
atomic_dec(&cached->refs);
|
||||
if (clear)
|
||||
atomic_dec(&cached->refs);
|
||||
state = cached;
|
||||
goto hit_next;
|
||||
}
|
||||
free_extent_state(cached);
|
||||
if (clear)
|
||||
free_extent_state(cached);
|
||||
}
|
||||
/*
|
||||
* this search will find the extents that end after
|
||||
@@ -946,11 +955,11 @@ int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
}
|
||||
|
||||
int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
gfp_t mask)
|
||||
struct extent_state **cached_state, gfp_t mask)
|
||||
{
|
||||
return set_extent_bit(tree, start, end,
|
||||
EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE,
|
||||
0, NULL, NULL, mask);
|
||||
0, NULL, cached_state, mask);
|
||||
}
|
||||
|
||||
int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
@@ -984,10 +993,11 @@ int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
}
|
||||
|
||||
static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
|
||||
u64 end, gfp_t mask)
|
||||
u64 end, struct extent_state **cached_state,
|
||||
gfp_t mask)
|
||||
{
|
||||
return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
|
||||
NULL, mask);
|
||||
cached_state, mask);
|
||||
}
|
||||
|
||||
int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
|
||||
@@ -1727,7 +1737,7 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
|
||||
}
|
||||
|
||||
if (!uptodate) {
|
||||
clear_extent_uptodate(tree, start, end, GFP_NOFS);
|
||||
clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
|
||||
ClearPageUptodate(page);
|
||||
SetPageError(page);
|
||||
}
|
||||
@@ -2710,6 +2720,7 @@ int extent_readpages(struct extent_io_tree *tree,
|
||||
int extent_invalidatepage(struct extent_io_tree *tree,
|
||||
struct page *page, unsigned long offset)
|
||||
{
|
||||
struct extent_state *cached_state = NULL;
|
||||
u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
|
||||
u64 end = start + PAGE_CACHE_SIZE - 1;
|
||||
size_t blocksize = page->mapping->host->i_sb->s_blocksize;
|
||||
@@ -2718,12 +2729,12 @@ int extent_invalidatepage(struct extent_io_tree *tree,
|
||||
if (start > end)
|
||||
return 0;
|
||||
|
||||
lock_extent(tree, start, end, GFP_NOFS);
|
||||
lock_extent_bits(tree, start, end, 0, &cached_state, GFP_NOFS);
|
||||
wait_on_page_writeback(page);
|
||||
clear_extent_bit(tree, start, end,
|
||||
EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
|
||||
EXTENT_DO_ACCOUNTING,
|
||||
1, 1, NULL, GFP_NOFS);
|
||||
1, 1, &cached_state, GFP_NOFS);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2926,16 +2937,17 @@ sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
|
||||
get_extent_t *get_extent)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
struct extent_state *cached_state = NULL;
|
||||
u64 start = iblock << inode->i_blkbits;
|
||||
sector_t sector = 0;
|
||||
size_t blksize = (1 << inode->i_blkbits);
|
||||
struct extent_map *em;
|
||||
|
||||
lock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
|
||||
GFP_NOFS);
|
||||
lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
|
||||
0, &cached_state, GFP_NOFS);
|
||||
em = get_extent(inode, NULL, 0, start, blksize, 0);
|
||||
unlock_extent(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
|
||||
GFP_NOFS);
|
||||
unlock_extent_cached(&BTRFS_I(inode)->io_tree, start,
|
||||
start + blksize - 1, &cached_state, GFP_NOFS);
|
||||
if (!em || IS_ERR(em))
|
||||
return 0;
|
||||
|
||||
@@ -2957,6 +2969,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
u32 flags = 0;
|
||||
u64 disko = 0;
|
||||
struct extent_map *em = NULL;
|
||||
struct extent_state *cached_state = NULL;
|
||||
int end = 0;
|
||||
u64 em_start = 0, em_len = 0;
|
||||
unsigned long emflags;
|
||||
@@ -2965,8 +2978,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
if (len == 0)
|
||||
return -EINVAL;
|
||||
|
||||
lock_extent(&BTRFS_I(inode)->io_tree, start, start + len,
|
||||
GFP_NOFS);
|
||||
lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
|
||||
&cached_state, GFP_NOFS);
|
||||
em = get_extent(inode, NULL, 0, off, max - off, 0);
|
||||
if (!em)
|
||||
goto out;
|
||||
@@ -3029,8 +3042,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
out_free:
|
||||
free_extent_map(em);
|
||||
out:
|
||||
unlock_extent(&BTRFS_I(inode)->io_tree, start, start + len,
|
||||
GFP_NOFS);
|
||||
unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len,
|
||||
&cached_state, GFP_NOFS);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -3270,7 +3283,8 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree,
|
||||
}
|
||||
|
||||
int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
|
||||
struct extent_buffer *eb)
|
||||
struct extent_buffer *eb,
|
||||
struct extent_state **cached_state)
|
||||
{
|
||||
unsigned long i;
|
||||
struct page *page;
|
||||
@@ -3280,7 +3294,7 @@ int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
|
||||
clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
|
||||
|
||||
clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
|
||||
GFP_NOFS);
|
||||
cached_state, GFP_NOFS);
|
||||
for (i = 0; i < num_pages; i++) {
|
||||
page = extent_buffer_page(eb, i);
|
||||
if (page)
|
||||
@@ -3340,7 +3354,8 @@ int extent_range_uptodate(struct extent_io_tree *tree,
|
||||
}
|
||||
|
||||
int extent_buffer_uptodate(struct extent_io_tree *tree,
|
||||
struct extent_buffer *eb)
|
||||
struct extent_buffer *eb,
|
||||
struct extent_state *cached_state)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned long num_pages;
|
||||
@@ -3352,7 +3367,7 @@ int extent_buffer_uptodate(struct extent_io_tree *tree,
|
||||
return 1;
|
||||
|
||||
ret = test_range_bit(tree, eb->start, eb->start + eb->len - 1,
|
||||
EXTENT_UPTODATE, 1, NULL);
|
||||
EXTENT_UPTODATE, 1, cached_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
Reference in New Issue
Block a user