Btrfs: Add a per-inode csum mutex to avoid races creating csum items
Signed-off-by: Chris Mason <chris.mason@oracle.com>
This commit is contained in:
@@ -30,6 +30,7 @@ struct btrfs_inode {
|
|||||||
struct extent_map_tree extent_tree;
|
struct extent_map_tree extent_tree;
|
||||||
struct extent_io_tree io_tree;
|
struct extent_io_tree io_tree;
|
||||||
struct extent_io_tree io_failure_tree;
|
struct extent_io_tree io_failure_tree;
|
||||||
|
struct mutex csum_mutex;
|
||||||
struct inode vfs_inode;
|
struct inode vfs_inode;
|
||||||
atomic_t ordered_writeback;
|
atomic_t ordered_writeback;
|
||||||
|
|
||||||
|
@@ -267,13 +267,13 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
|
|||||||
|
|
||||||
/* FIXME...EIEIO, ENOSPC and more */
|
/* FIXME...EIEIO, ENOSPC and more */
|
||||||
/* insert any holes we need to create */
|
/* insert any holes we need to create */
|
||||||
if (isize < end_pos) {
|
if (isize < start_pos) {
|
||||||
u64 last_pos_in_file;
|
u64 last_pos_in_file;
|
||||||
u64 hole_size;
|
u64 hole_size;
|
||||||
u64 mask = root->sectorsize - 1;
|
u64 mask = root->sectorsize - 1;
|
||||||
last_pos_in_file = (isize + mask) & ~mask;
|
last_pos_in_file = (isize + mask) & ~mask;
|
||||||
hole_size = (end_pos - last_pos_in_file + mask) & ~mask;
|
hole_size = (start_pos - last_pos_in_file + mask) & ~mask;
|
||||||
if (last_pos_in_file < end_pos) {
|
if (last_pos_in_file < start_pos) {
|
||||||
err = btrfs_drop_extents(trans, root, inode,
|
err = btrfs_drop_extents(trans, root, inode,
|
||||||
last_pos_in_file,
|
last_pos_in_file,
|
||||||
last_pos_in_file + hole_size,
|
last_pos_in_file + hole_size,
|
||||||
|
@@ -351,7 +351,9 @@ int __btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
|
|||||||
trans = btrfs_start_transaction(root, 1);
|
trans = btrfs_start_transaction(root, 1);
|
||||||
|
|
||||||
btrfs_set_trans_block_group(trans, inode);
|
btrfs_set_trans_block_group(trans, inode);
|
||||||
|
mutex_lock(&BTRFS_I(inode)->csum_mutex);
|
||||||
btrfs_csum_file_blocks(trans, root, inode, bio, sums);
|
btrfs_csum_file_blocks(trans, root, inode, bio, sums);
|
||||||
|
mutex_unlock(&BTRFS_I(inode)->csum_mutex);
|
||||||
|
|
||||||
ret = btrfs_end_transaction(trans, root);
|
ret = btrfs_end_transaction(trans, root);
|
||||||
BUG_ON(ret);
|
BUG_ON(ret);
|
||||||
@@ -1400,6 +1402,7 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p)
|
|||||||
inode->i_mapping, GFP_NOFS);
|
inode->i_mapping, GFP_NOFS);
|
||||||
extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
|
extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
|
||||||
inode->i_mapping, GFP_NOFS);
|
inode->i_mapping, GFP_NOFS);
|
||||||
|
mutex_init(&BTRFS_I(inode)->csum_mutex);
|
||||||
atomic_set(&BTRFS_I(inode)->ordered_writeback, 0);
|
atomic_set(&BTRFS_I(inode)->ordered_writeback, 0);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -1701,6 +1704,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
|
|||||||
inode->i_mapping, GFP_NOFS);
|
inode->i_mapping, GFP_NOFS);
|
||||||
extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
|
extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
|
||||||
inode->i_mapping, GFP_NOFS);
|
inode->i_mapping, GFP_NOFS);
|
||||||
|
mutex_init(&BTRFS_I(inode)->csum_mutex);
|
||||||
atomic_set(&BTRFS_I(inode)->ordered_writeback, 0);
|
atomic_set(&BTRFS_I(inode)->ordered_writeback, 0);
|
||||||
BTRFS_I(inode)->delalloc_bytes = 0;
|
BTRFS_I(inode)->delalloc_bytes = 0;
|
||||||
BTRFS_I(inode)->root = root;
|
BTRFS_I(inode)->root = root;
|
||||||
@@ -1924,6 +1928,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
|
|||||||
inode->i_mapping, GFP_NOFS);
|
inode->i_mapping, GFP_NOFS);
|
||||||
extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
|
extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
|
||||||
inode->i_mapping, GFP_NOFS);
|
inode->i_mapping, GFP_NOFS);
|
||||||
|
mutex_init(&BTRFS_I(inode)->csum_mutex);
|
||||||
BTRFS_I(inode)->delalloc_bytes = 0;
|
BTRFS_I(inode)->delalloc_bytes = 0;
|
||||||
atomic_set(&BTRFS_I(inode)->ordered_writeback, 0);
|
atomic_set(&BTRFS_I(inode)->ordered_writeback, 0);
|
||||||
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
|
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
|
||||||
@@ -2862,6 +2867,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
|
|||||||
inode->i_mapping, GFP_NOFS);
|
inode->i_mapping, GFP_NOFS);
|
||||||
extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
|
extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
|
||||||
inode->i_mapping, GFP_NOFS);
|
inode->i_mapping, GFP_NOFS);
|
||||||
|
mutex_init(&BTRFS_I(inode)->csum_mutex);
|
||||||
BTRFS_I(inode)->delalloc_bytes = 0;
|
BTRFS_I(inode)->delalloc_bytes = 0;
|
||||||
atomic_set(&BTRFS_I(inode)->ordered_writeback, 0);
|
atomic_set(&BTRFS_I(inode)->ordered_writeback, 0);
|
||||||
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
|
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
|
||||||
|
@@ -152,12 +152,13 @@ int btrfs_add_ordered_inode(struct inode *inode)
|
|||||||
inode->i_ino, &entry->rb_node);
|
inode->i_ino, &entry->rb_node);
|
||||||
|
|
||||||
BTRFS_I(inode)->ordered_trans = transid;
|
BTRFS_I(inode)->ordered_trans = transid;
|
||||||
|
if (!node)
|
||||||
|
igrab(inode);
|
||||||
|
|
||||||
write_unlock(&tree->lock);
|
write_unlock(&tree->lock);
|
||||||
|
|
||||||
if (node)
|
if (node)
|
||||||
kfree(entry);
|
kfree(entry);
|
||||||
else
|
|
||||||
igrab(inode);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -477,7 +477,7 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
|
|||||||
if (err)
|
if (err)
|
||||||
ret = err;
|
ret = err;
|
||||||
nr = trans->blocks_used;
|
nr = trans->blocks_used;
|
||||||
ret = btrfs_end_transaction(trans, tree_root);
|
ret = btrfs_end_transaction_throttle(trans, tree_root);
|
||||||
BUG_ON(ret);
|
BUG_ON(ret);
|
||||||
|
|
||||||
mutex_unlock(&root->fs_info->drop_mutex);
|
mutex_unlock(&root->fs_info->drop_mutex);
|
||||||
|
@@ -38,8 +38,15 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
|
|||||||
u64 last_ret = 0;
|
u64 last_ret = 0;
|
||||||
|
|
||||||
if (root->fs_info->extent_root == root) {
|
if (root->fs_info->extent_root == root) {
|
||||||
|
/*
|
||||||
|
* there's recursion here right now in the tree locking,
|
||||||
|
* we can't defrag the extent root without deadlock
|
||||||
|
*/
|
||||||
|
goto out;
|
||||||
|
#if 0
|
||||||
mutex_lock(&root->fs_info->alloc_mutex);
|
mutex_lock(&root->fs_info->alloc_mutex);
|
||||||
is_extent = 1;
|
is_extent = 1;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
if (root->ref_cows == 0 && !is_extent)
|
if (root->ref_cows == 0 && !is_extent)
|
||||||
|
Reference in New Issue
Block a user