Btrfs: cleanup destroy_marked_extents
We can just look up the extent_buffers for the range and free stuff that way. This makes the cleanup a bit cleaner and we can make sure to evict the extent_buffers pretty quickly by marking them as stale. Thanks, Signed-off-by: Josef Bacik <jbacik@fusionio.com>
This commit is contained in:
@@ -3752,13 +3752,9 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
|
|||||||
int mark)
|
int mark)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
struct page *page;
|
|
||||||
struct inode *btree_inode = root->fs_info->btree_inode;
|
|
||||||
struct extent_buffer *eb;
|
struct extent_buffer *eb;
|
||||||
u64 start = 0;
|
u64 start = 0;
|
||||||
u64 end;
|
u64 end;
|
||||||
u64 offset;
|
|
||||||
unsigned long index;
|
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
ret = find_first_extent_bit(dirty_pages, start, &start, &end,
|
ret = find_first_extent_bit(dirty_pages, start, &start, &end,
|
||||||
@@ -3768,35 +3764,17 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
|
|||||||
|
|
||||||
clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
|
clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
|
||||||
while (start <= end) {
|
while (start <= end) {
|
||||||
index = start >> PAGE_CACHE_SHIFT;
|
eb = btrfs_find_tree_block(root, start,
|
||||||
start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
|
root->leafsize);
|
||||||
page = find_get_page(btree_inode->i_mapping, index);
|
start += eb->len;
|
||||||
if (!page)
|
if (!eb)
|
||||||
continue;
|
continue;
|
||||||
offset = page_offset(page);
|
wait_on_extent_buffer_writeback(eb);
|
||||||
|
|
||||||
spin_lock(&dirty_pages->buffer_lock);
|
if (test_and_clear_bit(EXTENT_BUFFER_DIRTY,
|
||||||
eb = radix_tree_lookup(
|
&eb->bflags))
|
||||||
&(&BTRFS_I(page->mapping->host)->io_tree)->buffer,
|
clear_extent_buffer_dirty(eb);
|
||||||
offset >> PAGE_CACHE_SHIFT);
|
free_extent_buffer_stale(eb);
|
||||||
spin_unlock(&dirty_pages->buffer_lock);
|
|
||||||
if (eb)
|
|
||||||
ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY,
|
|
||||||
&eb->bflags);
|
|
||||||
lock_page(page);
|
|
||||||
|
|
||||||
wait_on_page_writeback(page);
|
|
||||||
if (PageDirty(page)) {
|
|
||||||
clear_page_dirty_for_io(page);
|
|
||||||
spin_lock_irq(&page->mapping->tree_lock);
|
|
||||||
radix_tree_tag_clear(&page->mapping->page_tree,
|
|
||||||
page_index(page),
|
|
||||||
PAGECACHE_TAG_DIRTY);
|
|
||||||
spin_unlock_irq(&page->mapping->tree_lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
unlock_page(page);
|
|
||||||
page_cache_release(page);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -3162,7 +3162,7 @@ static int eb_wait(void *word)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
|
void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
|
||||||
{
|
{
|
||||||
wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait,
|
wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait,
|
||||||
TASK_UNINTERRUPTIBLE);
|
TASK_UNINTERRUPTIBLE);
|
||||||
|
@@ -282,6 +282,7 @@ void free_extent_buffer_stale(struct extent_buffer *eb);
|
|||||||
int read_extent_buffer_pages(struct extent_io_tree *tree,
|
int read_extent_buffer_pages(struct extent_io_tree *tree,
|
||||||
struct extent_buffer *eb, u64 start, int wait,
|
struct extent_buffer *eb, u64 start, int wait,
|
||||||
get_extent_t *get_extent, int mirror_num);
|
get_extent_t *get_extent, int mirror_num);
|
||||||
|
void wait_on_extent_buffer_writeback(struct extent_buffer *eb);
|
||||||
|
|
||||||
static inline unsigned long num_extent_pages(u64 start, u64 len)
|
static inline unsigned long num_extent_pages(u64 start, u64 len)
|
||||||
{
|
{
|
||||||
|
Reference in New Issue
Block a user