Loading fs/btrfs/extent-tree.c +6 −2 Original line number Diff line number Diff line Loading @@ -2522,11 +2522,11 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, if (ref && ref->seq && btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) { spin_unlock(&locked_ref->lock); btrfs_delayed_ref_unlock(locked_ref); spin_lock(&delayed_refs->lock); locked_ref->processing = 0; delayed_refs->num_heads_ready++; spin_unlock(&delayed_refs->lock); btrfs_delayed_ref_unlock(locked_ref); locked_ref = NULL; cond_resched(); count++; Loading Loading @@ -2572,7 +2572,10 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, */ if (must_insert_reserved) locked_ref->must_insert_reserved = 1; spin_lock(&delayed_refs->lock); locked_ref->processing = 0; delayed_refs->num_heads_ready++; spin_unlock(&delayed_refs->lock); btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret); Loading Loading @@ -7384,7 +7387,8 @@ btrfs_lock_cluster(struct btrfs_block_group_cache *block_group, spin_unlock(&cluster->refill_lock); down_read(&used_bg->data_rwsem); /* We should only have one-level nested. */ down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING); spin_lock(&cluster->refill_lock); if (used_bg == cluster->block_group) Loading fs/btrfs/inode.c +9 −2 Original line number Diff line number Diff line Loading @@ -7623,11 +7623,18 @@ static void adjust_dio_outstanding_extents(struct inode *inode, * within our reservation, otherwise we need to adjust our inode * counter appropriately. */ if (dio_data->outstanding_extents) { if (dio_data->outstanding_extents >= num_extents) { dio_data->outstanding_extents -= num_extents; } else { /* * If dio write length has been split due to no large enough * contiguous space, we need to compensate our inode counter * appropriately. */ u64 num_needed = num_extents - dio_data->outstanding_extents; spin_lock(&BTRFS_I(inode)->lock); BTRFS_I(inode)->outstanding_extents += num_extents; BTRFS_I(inode)->outstanding_extents += num_needed; spin_unlock(&BTRFS_I(inode)->lock); } } Loading fs/btrfs/tree-log.c +10 −3 Original line number Diff line number Diff line Loading @@ -37,6 +37,7 @@ */ #define LOG_INODE_ALL 0 #define LOG_INODE_EXISTS 1 #define LOG_OTHER_INODE 2 /* * directory trouble cases Loading Loading @@ -4641,7 +4642,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, if (S_ISDIR(inode->i_mode) || (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags) && inode_only == LOG_INODE_EXISTS)) inode_only >= LOG_INODE_EXISTS)) max_key.type = BTRFS_XATTR_ITEM_KEY; else max_key.type = (u8)-1; Loading @@ -4665,7 +4666,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, return ret; } if (inode_only == LOG_OTHER_INODE) { inode_only = LOG_INODE_EXISTS; mutex_lock_nested(&BTRFS_I(inode)->log_mutex, SINGLE_DEPTH_NESTING); } else { mutex_lock(&BTRFS_I(inode)->log_mutex); } /* * a brute force approach to making sure we get the most uptodate Loading Loading @@ -4817,7 +4824,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, * unpin it. */ err = btrfs_log_inode(trans, root, other_inode, LOG_INODE_EXISTS, LOG_OTHER_INODE, 0, LLONG_MAX, ctx); iput(other_inode); if (err) Loading fs/btrfs/uuid-tree.c +1 −3 Original line number Diff line number Diff line Loading @@ -352,7 +352,5 @@ int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info, out: btrfs_free_path(path); if (ret) btrfs_warn(fs_info, "btrfs_uuid_tree_iterate failed %d", ret); return 0; return ret; } Loading
fs/btrfs/extent-tree.c +6 −2 Original line number Diff line number Diff line Loading @@ -2522,11 +2522,11 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, if (ref && ref->seq && btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) { spin_unlock(&locked_ref->lock); btrfs_delayed_ref_unlock(locked_ref); spin_lock(&delayed_refs->lock); locked_ref->processing = 0; delayed_refs->num_heads_ready++; spin_unlock(&delayed_refs->lock); btrfs_delayed_ref_unlock(locked_ref); locked_ref = NULL; cond_resched(); count++; Loading Loading @@ -2572,7 +2572,10 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, */ if (must_insert_reserved) locked_ref->must_insert_reserved = 1; spin_lock(&delayed_refs->lock); locked_ref->processing = 0; delayed_refs->num_heads_ready++; spin_unlock(&delayed_refs->lock); btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret); Loading Loading @@ -7384,7 +7387,8 @@ btrfs_lock_cluster(struct btrfs_block_group_cache *block_group, spin_unlock(&cluster->refill_lock); down_read(&used_bg->data_rwsem); /* We should only have one-level nested. */ down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING); spin_lock(&cluster->refill_lock); if (used_bg == cluster->block_group) Loading
fs/btrfs/inode.c +9 −2 Original line number Diff line number Diff line Loading @@ -7623,11 +7623,18 @@ static void adjust_dio_outstanding_extents(struct inode *inode, * within our reservation, otherwise we need to adjust our inode * counter appropriately. */ if (dio_data->outstanding_extents) { if (dio_data->outstanding_extents >= num_extents) { dio_data->outstanding_extents -= num_extents; } else { /* * If dio write length has been split due to no large enough * contiguous space, we need to compensate our inode counter * appropriately. */ u64 num_needed = num_extents - dio_data->outstanding_extents; spin_lock(&BTRFS_I(inode)->lock); BTRFS_I(inode)->outstanding_extents += num_extents; BTRFS_I(inode)->outstanding_extents += num_needed; spin_unlock(&BTRFS_I(inode)->lock); } } Loading
fs/btrfs/tree-log.c +10 −3 Original line number Diff line number Diff line Loading @@ -37,6 +37,7 @@ */ #define LOG_INODE_ALL 0 #define LOG_INODE_EXISTS 1 #define LOG_OTHER_INODE 2 /* * directory trouble cases Loading Loading @@ -4641,7 +4642,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, if (S_ISDIR(inode->i_mode) || (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags) && inode_only == LOG_INODE_EXISTS)) inode_only >= LOG_INODE_EXISTS)) max_key.type = BTRFS_XATTR_ITEM_KEY; else max_key.type = (u8)-1; Loading @@ -4665,7 +4666,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, return ret; } if (inode_only == LOG_OTHER_INODE) { inode_only = LOG_INODE_EXISTS; mutex_lock_nested(&BTRFS_I(inode)->log_mutex, SINGLE_DEPTH_NESTING); } else { mutex_lock(&BTRFS_I(inode)->log_mutex); } /* * a brute force approach to making sure we get the most uptodate Loading Loading @@ -4817,7 +4824,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, * unpin it. */ err = btrfs_log_inode(trans, root, other_inode, LOG_INODE_EXISTS, LOG_OTHER_INODE, 0, LLONG_MAX, ctx); iput(other_inode); if (err) Loading
fs/btrfs/uuid-tree.c +1 −3 Original line number Diff line number Diff line Loading @@ -352,7 +352,5 @@ int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info, out: btrfs_free_path(path); if (ret) btrfs_warn(fs_info, "btrfs_uuid_tree_iterate failed %d", ret); return 0; return ret; }