Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
* git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable: (33 commits) Btrfs: Fix page count calculation btrfs: Drop __exit attribute on btrfs_exit_compress btrfs: cleanup error handling in btrfs_unlink_inode() Btrfs: exclude super blocks when we read in block groups Btrfs: make sure search_bitmap finds something in remove_from_bitmap btrfs: fix return value check of btrfs_start_transaction() btrfs: checking NULL or not in some functions Btrfs: avoid uninit variable warnings in ordered-data.c Btrfs: catch errors from btrfs_sync_log Btrfs: make shrink_delalloc a little friendlier Btrfs: handle no memory properly in prepare_pages Btrfs: do error checking in btrfs_del_csums Btrfs: use the global block reserve if we cannot reserve space Btrfs: do not release more reserved bytes to the global_block_rsv than we need Btrfs: fix check_path_shared so it returns the right value btrfs: check return value of btrfs_start_ioctl_transaction() properly btrfs: fix return value check of btrfs_join_transaction() fs/btrfs/inode.c: Add missing IS_ERR test btrfs: fix missing break in switch phrase btrfs: fix several uncheck memory allocations ...
This commit is contained in:
@ -320,11 +320,6 @@ static int caching_kthread(void *data)
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
|
||||
exclude_super_stripes(extent_root, block_group);
|
||||
spin_lock(&block_group->space_info->lock);
|
||||
block_group->space_info->bytes_readonly += block_group->bytes_super;
|
||||
spin_unlock(&block_group->space_info->lock);
|
||||
|
||||
last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
|
||||
|
||||
/*
|
||||
@ -467,8 +462,10 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
|
||||
cache->cached = BTRFS_CACHE_NO;
|
||||
}
|
||||
spin_unlock(&cache->lock);
|
||||
if (ret == 1)
|
||||
if (ret == 1) {
|
||||
free_excluded_extents(fs_info->extent_root, cache);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (load_cache_only)
|
||||
@ -3344,8 +3341,10 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
|
||||
u64 reserved;
|
||||
u64 max_reclaim;
|
||||
u64 reclaimed = 0;
|
||||
long time_left;
|
||||
int pause = 1;
|
||||
int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
|
||||
int loops = 0;
|
||||
|
||||
block_rsv = &root->fs_info->delalloc_block_rsv;
|
||||
space_info = block_rsv->space_info;
|
||||
@ -3358,7 +3357,7 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
|
||||
|
||||
max_reclaim = min(reserved, to_reclaim);
|
||||
|
||||
while (1) {
|
||||
while (loops < 1024) {
|
||||
/* have the flusher threads jump in and do some IO */
|
||||
smp_mb();
|
||||
nr_pages = min_t(unsigned long, nr_pages,
|
||||
@ -3366,8 +3365,12 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
|
||||
writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages);
|
||||
|
||||
spin_lock(&space_info->lock);
|
||||
if (reserved > space_info->bytes_reserved)
|
||||
if (reserved > space_info->bytes_reserved) {
|
||||
loops = 0;
|
||||
reclaimed += reserved - space_info->bytes_reserved;
|
||||
} else {
|
||||
loops++;
|
||||
}
|
||||
reserved = space_info->bytes_reserved;
|
||||
spin_unlock(&space_info->lock);
|
||||
|
||||
@ -3378,7 +3381,12 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
|
||||
return -EAGAIN;
|
||||
|
||||
__set_current_state(TASK_INTERRUPTIBLE);
|
||||
schedule_timeout(pause);
|
||||
time_left = schedule_timeout(pause);
|
||||
|
||||
/* We were interrupted, exit */
|
||||
if (time_left)
|
||||
break;
|
||||
|
||||
pause <<= 1;
|
||||
if (pause > HZ / 10)
|
||||
pause = HZ / 10;
|
||||
@ -3588,8 +3596,20 @@ void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
|
||||
|
||||
if (num_bytes > 0) {
|
||||
if (dest) {
|
||||
block_rsv_add_bytes(dest, num_bytes, 0);
|
||||
} else {
|
||||
spin_lock(&dest->lock);
|
||||
if (!dest->full) {
|
||||
u64 bytes_to_add;
|
||||
|
||||
bytes_to_add = dest->size - dest->reserved;
|
||||
bytes_to_add = min(num_bytes, bytes_to_add);
|
||||
dest->reserved += bytes_to_add;
|
||||
if (dest->reserved >= dest->size)
|
||||
dest->full = 1;
|
||||
num_bytes -= bytes_to_add;
|
||||
}
|
||||
spin_unlock(&dest->lock);
|
||||
}
|
||||
if (num_bytes) {
|
||||
spin_lock(&space_info->lock);
|
||||
space_info->bytes_reserved -= num_bytes;
|
||||
spin_unlock(&space_info->lock);
|
||||
@ -4012,6 +4032,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
|
||||
|
||||
num_bytes = ALIGN(num_bytes, root->sectorsize);
|
||||
atomic_dec(&BTRFS_I(inode)->outstanding_extents);
|
||||
WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0);
|
||||
|
||||
spin_lock(&BTRFS_I(inode)->accounting_lock);
|
||||
nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
|
||||
@ -5633,6 +5654,7 @@ use_block_rsv(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, u32 blocksize)
|
||||
{
|
||||
struct btrfs_block_rsv *block_rsv;
|
||||
struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
|
||||
int ret;
|
||||
|
||||
block_rsv = get_block_rsv(trans, root);
|
||||
@ -5640,14 +5662,39 @@ use_block_rsv(struct btrfs_trans_handle *trans,
|
||||
if (block_rsv->size == 0) {
|
||||
ret = reserve_metadata_bytes(trans, root, block_rsv,
|
||||
blocksize, 0);
|
||||
if (ret)
|
||||
/*
|
||||
* If we couldn't reserve metadata bytes try and use some from
|
||||
* the global reserve.
|
||||
*/
|
||||
if (ret && block_rsv != global_rsv) {
|
||||
ret = block_rsv_use_bytes(global_rsv, blocksize);
|
||||
if (!ret)
|
||||
return global_rsv;
|
||||
return ERR_PTR(ret);
|
||||
} else if (ret) {
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
return block_rsv;
|
||||
}
|
||||
|
||||
ret = block_rsv_use_bytes(block_rsv, blocksize);
|
||||
if (!ret)
|
||||
return block_rsv;
|
||||
if (ret) {
|
||||
WARN_ON(1);
|
||||
ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize,
|
||||
0);
|
||||
if (!ret) {
|
||||
spin_lock(&block_rsv->lock);
|
||||
block_rsv->size += blocksize;
|
||||
spin_unlock(&block_rsv->lock);
|
||||
return block_rsv;
|
||||
} else if (ret && block_rsv != global_rsv) {
|
||||
ret = block_rsv_use_bytes(global_rsv, blocksize);
|
||||
if (!ret)
|
||||
return global_rsv;
|
||||
}
|
||||
}
|
||||
|
||||
return ERR_PTR(-ENOSPC);
|
||||
}
|
||||
@ -6221,6 +6268,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
|
||||
BUG_ON(!wc);
|
||||
|
||||
trans = btrfs_start_transaction(tree_root, 0);
|
||||
BUG_ON(IS_ERR(trans));
|
||||
|
||||
if (block_rsv)
|
||||
trans->block_rsv = block_rsv;
|
||||
|
||||
@ -6318,6 +6367,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
|
||||
|
||||
btrfs_end_transaction_throttle(trans, tree_root);
|
||||
trans = btrfs_start_transaction(tree_root, 0);
|
||||
BUG_ON(IS_ERR(trans));
|
||||
if (block_rsv)
|
||||
trans->block_rsv = block_rsv;
|
||||
}
|
||||
@ -6446,6 +6496,8 @@ static noinline int relocate_inode_pages(struct inode *inode, u64 start,
|
||||
int ret = 0;
|
||||
|
||||
ra = kzalloc(sizeof(*ra), GFP_NOFS);
|
||||
if (!ra)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
first_index = start >> PAGE_CACHE_SHIFT;
|
||||
@ -7477,7 +7529,7 @@ int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
|
||||
BUG_ON(reloc_root->commit_root != NULL);
|
||||
while (1) {
|
||||
trans = btrfs_join_transaction(root, 1);
|
||||
BUG_ON(!trans);
|
||||
BUG_ON(IS_ERR(trans));
|
||||
|
||||
mutex_lock(&root->fs_info->drop_mutex);
|
||||
ret = btrfs_drop_snapshot(trans, reloc_root);
|
||||
@ -7535,7 +7587,7 @@ int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
|
||||
|
||||
if (found) {
|
||||
trans = btrfs_start_transaction(root, 1);
|
||||
BUG_ON(!trans);
|
||||
BUG_ON(IS_ERR(trans));
|
||||
ret = btrfs_commit_transaction(trans, root);
|
||||
BUG_ON(ret);
|
||||
}
|
||||
@ -7779,7 +7831,7 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
|
||||
|
||||
|
||||
trans = btrfs_start_transaction(extent_root, 1);
|
||||
BUG_ON(!trans);
|
||||
BUG_ON(IS_ERR(trans));
|
||||
|
||||
if (extent_key->objectid == 0) {
|
||||
ret = del_extent_zero(trans, extent_root, path, extent_key);
|
||||
@ -8270,6 +8322,13 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
|
||||
if (block_group->cached == BTRFS_CACHE_STARTED)
|
||||
wait_block_group_cache_done(block_group);
|
||||
|
||||
/*
|
||||
* We haven't cached this block group, which means we could
|
||||
* possibly have excluded extents on this block group.
|
||||
*/
|
||||
if (block_group->cached == BTRFS_CACHE_NO)
|
||||
free_excluded_extents(info->extent_root, block_group);
|
||||
|
||||
btrfs_remove_free_space_cache(block_group);
|
||||
btrfs_put_block_group(block_group);
|
||||
|
||||
@ -8384,6 +8443,13 @@ int btrfs_read_block_groups(struct btrfs_root *root)
|
||||
cache->flags = btrfs_block_group_flags(&cache->item);
|
||||
cache->sectorsize = root->sectorsize;
|
||||
|
||||
/*
|
||||
* We need to exclude the super stripes now so that the space
|
||||
* info has super bytes accounted for, otherwise we'll think
|
||||
* we have more space than we actually do.
|
||||
*/
|
||||
exclude_super_stripes(root, cache);
|
||||
|
||||
/*
|
||||
* check for two cases, either we are full, and therefore
|
||||
* don't need to bother with the caching work since we won't
|
||||
@ -8392,12 +8458,10 @@ int btrfs_read_block_groups(struct btrfs_root *root)
|
||||
* time, particularly in the full case.
|
||||
*/
|
||||
if (found_key.offset == btrfs_block_group_used(&cache->item)) {
|
||||
exclude_super_stripes(root, cache);
|
||||
cache->last_byte_to_unpin = (u64)-1;
|
||||
cache->cached = BTRFS_CACHE_FINISHED;
|
||||
free_excluded_extents(root, cache);
|
||||
} else if (btrfs_block_group_used(&cache->item) == 0) {
|
||||
exclude_super_stripes(root, cache);
|
||||
cache->last_byte_to_unpin = (u64)-1;
|
||||
cache->cached = BTRFS_CACHE_FINISHED;
|
||||
add_new_free_space(cache, root->fs_info,
|
||||
|
Reference in New Issue
Block a user