Loading fs/btrfs/Makefile +3 −2 Original line number Diff line number Diff line Loading @@ -9,11 +9,12 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ export.o tree-log.o free-space-cache.o zlib.o lzo.o \ compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \ reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \ uuid-tree.o props.o hash.o uuid-tree.o props.o hash.o free-space-tree.o btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o btrfs-$(CONFIG_BTRFS_FS_RUN_SANITY_TESTS) += tests/free-space-tests.o \ tests/extent-buffer-tests.o tests/btrfs-tests.o \ tests/extent-io-tests.o tests/inode-tests.o tests/qgroup-tests.o tests/extent-io-tests.o tests/inode-tests.o tests/qgroup-tests.o \ tests/free-space-tree-tests.o fs/btrfs/ctree.h +155 −2 Original line number Diff line number Diff line Loading @@ -96,6 +96,9 @@ struct btrfs_ordered_sum; /* for storing items that use the BTRFS_UUID_KEY* types */ #define BTRFS_UUID_TREE_OBJECTID 9ULL /* tracks free space in block groups. */ #define BTRFS_FREE_SPACE_TREE_OBJECTID 10ULL /* for storing balance parameters in the root tree */ #define BTRFS_BALANCE_OBJECTID -4ULL Loading Loading @@ -500,6 +503,8 @@ struct btrfs_super_block { * Compat flags that we support. If any incompat flags are set other than the * ones specified below then we will fail to mount */ #define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE (1ULL << 0) #define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0) #define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1) #define BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS (1ULL << 2) Loading @@ -526,7 +531,10 @@ struct btrfs_super_block { #define BTRFS_FEATURE_COMPAT_SUPP 0ULL #define BTRFS_FEATURE_COMPAT_SAFE_SET 0ULL #define BTRFS_FEATURE_COMPAT_SAFE_CLEAR 0ULL #define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL #define BTRFS_FEATURE_COMPAT_RO_SUPP \ (BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE) #define BTRFS_FEATURE_COMPAT_RO_SAFE_SET 0ULL #define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR 0ULL Loading Loading @@ -1088,6 +1096,13 @@ struct btrfs_block_group_item { __le64 flags; } __attribute__ ((__packed__)); struct btrfs_free_space_info { __le32 extent_count; __le32 flags; } __attribute__ ((__packed__)); #define BTRFS_FREE_SPACE_USING_BITMAPS (1ULL << 0) #define BTRFS_QGROUP_LEVEL_SHIFT 48 static inline u64 btrfs_qgroup_level(u64 qgroupid) { Loading Loading @@ -1296,6 +1311,9 @@ struct btrfs_caching_control { atomic_t count; }; /* Once caching_thread() finds this much free space, it will wake up waiters. */ #define CACHING_CTL_WAKE_UP (1024 * 1024 * 2) struct btrfs_io_ctl { void *cur, *orig; struct page *page; Loading @@ -1321,8 +1339,20 @@ struct btrfs_block_group_cache { u64 delalloc_bytes; u64 bytes_super; u64 flags; u64 sectorsize; u64 cache_generation; u32 sectorsize; /* * If the free space extent count exceeds this number, convert the block * group to bitmaps. */ u32 bitmap_high_thresh; /* * If the free space extent count drops below this number, convert the * block group back to extents. */ u32 bitmap_low_thresh; /* * It is just used for the delayed data space allocation because Loading Loading @@ -1378,6 +1408,15 @@ struct btrfs_block_group_cache { struct list_head io_list; struct btrfs_io_ctl io_ctl; /* Lock for free space tree operations. */ struct mutex free_space_lock; /* * Does the block group need to be added to the free space tree? * Protected by free_space_lock. */ int needs_free_space; }; /* delayed seq elem */ Loading Loading @@ -1429,6 +1468,7 @@ struct btrfs_fs_info { struct btrfs_root *csum_root; struct btrfs_root *quota_root; struct btrfs_root *uuid_root; struct btrfs_root *free_space_root; /* the log root tree is a directory of all the other log roots */ struct btrfs_root *log_root_tree; Loading Loading @@ -2092,6 +2132,27 @@ struct btrfs_ioctl_defrag_range_args { */ #define BTRFS_BLOCK_GROUP_ITEM_KEY 192 /* * Every block group is represented in the free space tree by a free space info * item, which stores some accounting information. It is keyed on * (block_group_start, FREE_SPACE_INFO, block_group_length). */ #define BTRFS_FREE_SPACE_INFO_KEY 198 /* * A free space extent tracks an extent of space that is free in a block group. * It is keyed on (start, FREE_SPACE_EXTENT, length). */ #define BTRFS_FREE_SPACE_EXTENT_KEY 199 /* * When a block group becomes very fragmented, we convert it to use bitmaps * instead of extents. A free space bitmap is keyed on * (start, FREE_SPACE_BITMAP, length); the corresponding item is a bitmap with * (length / sectorsize) bits. */ #define BTRFS_FREE_SPACE_BITMAP_KEY 200 #define BTRFS_DEV_EXTENT_KEY 204 #define BTRFS_DEV_ITEM_KEY 216 #define BTRFS_CHUNK_ITEM_KEY 228 Loading Loading @@ -2184,6 +2245,7 @@ struct btrfs_ioctl_defrag_range_args { #define BTRFS_MOUNT_RESCAN_UUID_TREE (1 << 23) #define BTRFS_MOUNT_FRAGMENT_DATA (1 << 24) #define BTRFS_MOUNT_FRAGMENT_METADATA (1 << 25) #define BTRFS_MOUNT_FREE_SPACE_TREE (1 << 26) #define BTRFS_DEFAULT_COMMIT_INTERVAL (30) #define BTRFS_DEFAULT_MAX_INLINE (8192) Loading Loading @@ -2506,6 +2568,11 @@ BTRFS_SETGET_FUNCS(disk_block_group_flags, BTRFS_SETGET_STACK_FUNCS(block_group_flags, struct btrfs_block_group_item, flags, 64); /* struct btrfs_free_space_info */ BTRFS_SETGET_FUNCS(free_space_extent_count, struct btrfs_free_space_info, extent_count, 32); BTRFS_SETGET_FUNCS(free_space_flags, struct btrfs_free_space_info, flags, 32); /* struct btrfs_inode_ref */ BTRFS_SETGET_FUNCS(inode_ref_name_len, struct btrfs_inode_ref, name_len, 16); BTRFS_SETGET_FUNCS(inode_ref_index, struct btrfs_inode_ref, index, 64); Loading Loading @@ -3573,6 +3640,9 @@ void btrfs_end_write_no_snapshoting(struct btrfs_root *root); void check_system_chunk(struct btrfs_trans_handle *trans, struct btrfs_root *root, const u64 type); u64 add_new_free_space(struct btrfs_block_group_cache *block_group, struct btrfs_fs_info *info, u64 start, u64 end); /* ctree.c */ int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, int level, int *slot); Loading Loading @@ -3737,6 +3807,7 @@ static inline void free_fs_info(struct btrfs_fs_info *fs_info) kfree(fs_info->csum_root); kfree(fs_info->quota_root); kfree(fs_info->uuid_root); kfree(fs_info->free_space_root); kfree(fs_info->super_copy); kfree(fs_info->super_for_commit); security_free_mnt_opts(&fs_info->security_opts); Loading Loading @@ -4246,6 +4317,30 @@ static inline void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, } } #define btrfs_clear_fs_incompat(__fs_info, opt) \ __btrfs_clear_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt) static inline void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag) { struct btrfs_super_block *disk_super; u64 features; disk_super = fs_info->super_copy; features = btrfs_super_incompat_flags(disk_super); if (features & flag) { spin_lock(&fs_info->super_lock); features = btrfs_super_incompat_flags(disk_super); if (features & flag) { features &= ~flag; btrfs_set_super_incompat_flags(disk_super, features); btrfs_info(fs_info, "clearing %llu feature flag", flag); } spin_unlock(&fs_info->super_lock); } } #define btrfs_fs_incompat(fs_info, opt) \ __btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt) Loading @@ -4256,6 +4351,64 @@ static inline int __btrfs_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag) return !!(btrfs_super_incompat_flags(disk_super) & flag); } #define btrfs_set_fs_compat_ro(__fs_info, opt) \ __btrfs_set_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt) static inline void __btrfs_set_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag) { struct btrfs_super_block *disk_super; u64 features; disk_super = fs_info->super_copy; features = btrfs_super_compat_ro_flags(disk_super); if (!(features & flag)) { spin_lock(&fs_info->super_lock); features = btrfs_super_compat_ro_flags(disk_super); if (!(features & flag)) { features |= flag; btrfs_set_super_compat_ro_flags(disk_super, features); btrfs_info(fs_info, "setting %llu ro feature flag", flag); } spin_unlock(&fs_info->super_lock); } } #define btrfs_clear_fs_compat_ro(__fs_info, opt) \ __btrfs_clear_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt) static inline void __btrfs_clear_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag) { struct btrfs_super_block *disk_super; u64 features; disk_super = fs_info->super_copy; features = btrfs_super_compat_ro_flags(disk_super); if (features & flag) { spin_lock(&fs_info->super_lock); features = btrfs_super_compat_ro_flags(disk_super); if (features & flag) { features &= ~flag; btrfs_set_super_compat_ro_flags(disk_super, features); btrfs_info(fs_info, "clearing %llu ro feature flag", flag); } spin_unlock(&fs_info->super_lock); } } #define btrfs_fs_compat_ro(fs_info, opt) \ __btrfs_fs_compat_ro((fs_info), BTRFS_FEATURE_COMPAT_RO_##opt) static inline int __btrfs_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag) { struct btrfs_super_block *disk_super; disk_super = fs_info->super_copy; return !!(btrfs_super_compat_ro_flags(disk_super) & flag); } /* * Call btrfs_abort_transaction as early as possible when an error condition is * detected, that way the exact line number is reported. Loading fs/btrfs/disk-io.c +38 −0 Original line number Diff line number Diff line Loading @@ -42,6 +42,7 @@ #include "locking.h" #include "tree-log.h" #include "free-space-cache.h" #include "free-space-tree.h" #include "inode-map.h" #include "check-integrity.h" #include "rcu-string.h" Loading Loading @@ -1650,6 +1651,9 @@ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, if (location->objectid == BTRFS_UUID_TREE_OBJECTID) return fs_info->uuid_root ? fs_info->uuid_root : ERR_PTR(-ENOENT); if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID) return fs_info->free_space_root ? fs_info->free_space_root : ERR_PTR(-ENOENT); again: root = btrfs_lookup_fs_root(fs_info, location->objectid); if (root) { Loading Loading @@ -2148,6 +2152,7 @@ static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root) free_root_extent_buffers(info->uuid_root); if (chunk_root) free_root_extent_buffers(info->chunk_root); free_root_extent_buffers(info->free_space_root); } void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info) Loading Loading @@ -2448,6 +2453,15 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info, fs_info->uuid_root = root; } if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID; root = btrfs_read_tree_root(tree_root, &location); if (IS_ERR(root)) return PTR_ERR(root); set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); fs_info->free_space_root = root; } return 0; } Loading Loading @@ -3076,6 +3090,30 @@ int open_ctree(struct super_block *sb, btrfs_qgroup_rescan_resume(fs_info); if (btrfs_test_opt(tree_root, CLEAR_CACHE) && btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { pr_info("BTRFS: clearing free space tree\n"); ret = btrfs_clear_free_space_tree(fs_info); if (ret) { pr_warn("BTRFS: failed to clear free space tree %d\n", ret); close_ctree(tree_root); return ret; } } if (btrfs_test_opt(tree_root, FREE_SPACE_TREE) && !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { pr_info("BTRFS: creating free space tree\n"); ret = btrfs_create_free_space_tree(fs_info); if (ret) { pr_warn("BTRFS: failed to create free space tree %d\n", ret); close_ctree(tree_root); return ret; } } if (!fs_info->uuid_root) { pr_info("BTRFS: creating UUID tree\n"); ret = btrfs_create_uuid_tree(fs_info); Loading fs/btrfs/extent-tree.c +71 −30 Original line number Diff line number Diff line Loading @@ -33,6 +33,7 @@ #include "raid56.h" #include "locking.h" #include "free-space-cache.h" #include "free-space-tree.h" #include "math.h" #include "sysfs.h" #include "qgroup.h" Loading Loading @@ -357,7 +358,7 @@ static void fragment_free_space(struct btrfs_root *root, * we need to check the pinned_extents for any extents that can't be used yet * since their free space will be released as soon as the transaction commits. */ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group, u64 add_new_free_space(struct btrfs_block_group_cache *block_group, struct btrfs_fs_info *info, u64 start, u64 end) { u64 extent_start, extent_end, size, total_added = 0; Loading Loading @@ -395,11 +396,10 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group, return total_added; } static noinline void caching_thread(struct btrfs_work *work) static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl) { struct btrfs_block_group_cache *block_group; struct btrfs_fs_info *fs_info; struct btrfs_caching_control *caching_ctl; struct btrfs_root *extent_root; struct btrfs_path *path; struct extent_buffer *leaf; Loading @@ -407,17 +407,16 @@ static noinline void caching_thread(struct btrfs_work *work) u64 total_found = 0; u64 last = 0; u32 nritems; int ret = -ENOMEM; int ret; bool wakeup = true; caching_ctl = container_of(work, struct btrfs_caching_control, work); block_group = caching_ctl->block_group; fs_info = block_group->fs_info; extent_root = fs_info->extent_root; path = btrfs_alloc_path(); if (!path) goto out; return -ENOMEM; last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); Loading @@ -443,15 +442,11 @@ static noinline void caching_thread(struct btrfs_work *work) key.objectid = last; key.offset = 0; key.type = BTRFS_EXTENT_ITEM_KEY; again: mutex_lock(&caching_ctl->mutex); /* need to make sure the commit_root doesn't disappear */ down_read(&fs_info->commit_root_sem); next: ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); if (ret < 0) goto err; goto out; leaf = path->nodes[0]; nritems = btrfs_header_nritems(leaf); Loading @@ -477,12 +472,14 @@ static noinline void caching_thread(struct btrfs_work *work) up_read(&fs_info->commit_root_sem); mutex_unlock(&caching_ctl->mutex); cond_resched(); goto again; mutex_lock(&caching_ctl->mutex); down_read(&fs_info->commit_root_sem); goto next; } ret = btrfs_next_leaf(extent_root, path); if (ret < 0) goto err; goto out; if (ret) break; leaf = path->nodes[0]; Loading Loading @@ -521,7 +518,7 @@ static noinline void caching_thread(struct btrfs_work *work) else last = key.objectid + key.offset; if (total_found > (1024 * 1024 * 2)) { if (total_found > CACHING_CTL_WAKE_UP) { total_found = 0; if (wakeup) wake_up(&caching_ctl->wait); Loading @@ -534,9 +531,35 @@ static noinline void caching_thread(struct btrfs_work *work) total_found += add_new_free_space(block_group, fs_info, last, block_group->key.objectid + block_group->key.offset); caching_ctl->progress = (u64)-1; out: btrfs_free_path(path); return ret; } static noinline void caching_thread(struct btrfs_work *work) { struct btrfs_block_group_cache *block_group; struct btrfs_fs_info *fs_info; struct btrfs_caching_control *caching_ctl; int ret; caching_ctl = container_of(work, struct btrfs_caching_control, work); block_group = caching_ctl->block_group; fs_info = block_group->fs_info; mutex_lock(&caching_ctl->mutex); down_read(&fs_info->commit_root_sem); if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) ret = load_free_space_tree(caching_ctl); else ret = load_extent_tree_free(caching_ctl); spin_lock(&block_group->lock); block_group->caching_ctl = NULL; block_group->cached = BTRFS_CACHE_FINISHED; block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED; spin_unlock(&block_group->lock); #ifdef CONFIG_BTRFS_DEBUG Loading @@ -555,20 +578,11 @@ static noinline void caching_thread(struct btrfs_work *work) #endif caching_ctl->progress = (u64)-1; err: btrfs_free_path(path); up_read(&fs_info->commit_root_sem); free_excluded_extents(extent_root, block_group); up_read(&fs_info->commit_root_sem); free_excluded_extents(fs_info->extent_root, block_group); mutex_unlock(&caching_ctl->mutex); out: if (ret) { spin_lock(&block_group->lock); block_group->caching_ctl = NULL; block_group->cached = BTRFS_CACHE_ERROR; spin_unlock(&block_group->lock); } wake_up(&caching_ctl->wait); put_caching_control(caching_ctl); Loading Loading @@ -680,8 +694,8 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, } } else { /* * We are not going to do the fast caching, set cached to the * appropriate value and wakeup any waiters. * We're either using the free space tree or no caching at all. * Set cached to the appropriate value and wakeup any waiters. */ spin_lock(&cache->lock); if (load_cache_only) { Loading Loading @@ -6676,6 +6690,13 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, } } ret = add_to_free_space_tree(trans, root->fs_info, bytenr, num_bytes); if (ret) { btrfs_abort_transaction(trans, extent_root, ret); goto out; } ret = update_block_group(trans, root, bytenr, num_bytes, 0); if (ret) { btrfs_abort_transaction(trans, extent_root, ret); Loading Loading @@ -7687,6 +7708,11 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, btrfs_mark_buffer_dirty(path->nodes[0]); btrfs_free_path(path); ret = remove_from_free_space_tree(trans, fs_info, ins->objectid, ins->offset); if (ret) return ret; ret = update_block_group(trans, root, ins->objectid, ins->offset, 1); if (ret) { /* -ENOENT, logic error */ btrfs_err(fs_info, "update block group failed for %llu %llu", Loading Loading @@ -7767,6 +7793,11 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, btrfs_mark_buffer_dirty(leaf); btrfs_free_path(path); ret = remove_from_free_space_tree(trans, fs_info, ins->objectid, num_bytes); if (ret) return ret; ret = update_block_group(trans, root, ins->objectid, root->nodesize, 1); if (ret) { /* -ENOENT, logic error */ Loading Loading @@ -9671,6 +9702,8 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size) cache->full_stripe_len = btrfs_full_stripe_len(root, &root->fs_info->mapping_tree, start); set_free_space_tree_thresholds(cache); atomic_set(&cache->count, 1); spin_lock_init(&cache->lock); init_rwsem(&cache->data_rwsem); Loading @@ -9682,6 +9715,7 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size) INIT_LIST_HEAD(&cache->io_list); btrfs_init_free_space_ctl(cache); atomic_set(&cache->trimming, 0); mutex_init(&cache->free_space_lock); return cache; } Loading Loading @@ -9892,6 +9926,8 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans, key.objectid, key.offset); if (ret) btrfs_abort_transaction(trans, extent_root, ret); add_block_group_free_space(trans, root->fs_info, block_group); /* already aborted the transaction if it failed. */ next: list_del_init(&block_group->bg_list); } Loading Loading @@ -9922,6 +9958,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, cache->flags = type; cache->last_byte_to_unpin = (u64)-1; cache->cached = BTRFS_CACHE_FINISHED; cache->needs_free_space = 1; ret = exclude_super_stripes(root, cache); if (ret) { /* Loading Loading @@ -10292,6 +10329,10 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, unlock_chunks(root); ret = remove_block_group_free_space(trans, root->fs_info, block_group); if (ret) goto out; btrfs_put_block_group(block_group); btrfs_put_block_group(block_group); Loading fs/btrfs/extent_io.c +170 −13 Original line number Diff line number Diff line Loading @@ -4702,24 +4702,14 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src) return new; } struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, u64 start) struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, u64 start, unsigned long len) { struct extent_buffer *eb; unsigned long len; unsigned long num_pages; unsigned long i; if (!fs_info) { /* * Called only from tests that don't always have a fs_info * available, but we know that nodesize is 4096 */ len = 4096; } else { len = fs_info->tree_root->nodesize; } num_pages = num_extent_pages(0, len); num_pages = num_extent_pages(start, len); eb = __alloc_extent_buffer(fs_info, start, len); if (!eb) Loading @@ -4742,6 +4732,24 @@ struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, return NULL; } struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, u64 start) { unsigned long len; if (!fs_info) { /* * Called only from tests that don't always have a fs_info * available, but we know that nodesize is 4096 */ len = 4096; } else { len = fs_info->tree_root->nodesize; } return __alloc_dummy_extent_buffer(fs_info, start, len); } static void check_buffer_tree_ref(struct extent_buffer *eb) { int refs; Loading Loading @@ -5497,6 +5505,155 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, } } /* * The extent buffer bitmap operations are done with byte granularity because * bitmap items are not guaranteed to be aligned to a word and therefore a * single word in a bitmap may straddle two pages in the extent buffer. */ #define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE) #define BYTE_MASK ((1 << BITS_PER_BYTE) - 1) #define BITMAP_FIRST_BYTE_MASK(start) \ ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK) #define BITMAP_LAST_BYTE_MASK(nbits) \ (BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1))) /* * eb_bitmap_offset() - calculate the page and offset of the byte containing the * given bit number * @eb: the extent buffer * @start: offset of the bitmap item in the extent buffer * @nr: bit number * @page_index: return index of the page in the extent buffer that contains the * given bit number * @page_offset: return offset into the page given by page_index * * This helper hides the ugliness of finding the byte in an extent buffer which * contains a given bit. */ static inline void eb_bitmap_offset(struct extent_buffer *eb, unsigned long start, unsigned long nr, unsigned long *page_index, size_t *page_offset) { size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); size_t byte_offset = BIT_BYTE(nr); size_t offset; /* * The byte we want is the offset of the extent buffer + the offset of * the bitmap item in the extent buffer + the offset of the byte in the * bitmap item. */ offset = start_offset + start + byte_offset; *page_index = offset >> PAGE_CACHE_SHIFT; *page_offset = offset & (PAGE_CACHE_SIZE - 1); } /** * extent_buffer_test_bit - determine whether a bit in a bitmap item is set * @eb: the extent buffer * @start: offset of the bitmap item in the extent buffer * @nr: bit number to test */ int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start, unsigned long nr) { char *kaddr; struct page *page; unsigned long i; size_t offset; eb_bitmap_offset(eb, start, nr, &i, &offset); page = eb->pages[i]; WARN_ON(!PageUptodate(page)); kaddr = page_address(page); return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1))); } /** * extent_buffer_bitmap_set - set an area of a bitmap * @eb: the extent buffer * @start: offset of the bitmap item in the extent buffer * @pos: bit number of the first bit * @len: number of bits to set */ void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start, unsigned long pos, unsigned long len) { char *kaddr; struct page *page; unsigned long i; size_t offset; const unsigned int size = pos + len; int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE); unsigned int mask_to_set = BITMAP_FIRST_BYTE_MASK(pos); eb_bitmap_offset(eb, start, pos, &i, &offset); page = eb->pages[i]; WARN_ON(!PageUptodate(page)); kaddr = page_address(page); while (len >= bits_to_set) { kaddr[offset] |= mask_to_set; len -= bits_to_set; bits_to_set = BITS_PER_BYTE; mask_to_set = ~0U; if (++offset >= PAGE_CACHE_SIZE && len > 0) { offset = 0; page = eb->pages[++i]; WARN_ON(!PageUptodate(page)); kaddr = page_address(page); } } if (len) { mask_to_set &= BITMAP_LAST_BYTE_MASK(size); kaddr[offset] |= mask_to_set; } } /** * extent_buffer_bitmap_clear - clear an area of a bitmap * @eb: the extent buffer * @start: offset of the bitmap item in the extent buffer * @pos: bit number of the first bit * @len: number of bits to clear */ void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start, unsigned long pos, unsigned long len) { char *kaddr; struct page *page; unsigned long i; size_t offset; const unsigned int size = pos + len; int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE); unsigned int mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos); eb_bitmap_offset(eb, start, pos, &i, &offset); page = eb->pages[i]; WARN_ON(!PageUptodate(page)); kaddr = page_address(page); while (len >= bits_to_clear) { kaddr[offset] &= ~mask_to_clear; len -= bits_to_clear; bits_to_clear = BITS_PER_BYTE; mask_to_clear = ~0U; if (++offset >= PAGE_CACHE_SIZE && len > 0) { offset = 0; page = eb->pages[++i]; WARN_ON(!PageUptodate(page)); kaddr = page_address(page); } } if (len) { mask_to_clear &= BITMAP_LAST_BYTE_MASK(size); kaddr[offset] &= ~mask_to_clear; } } static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len) { unsigned long distance = (src > dst) ? src - dst : dst - src; Loading Loading
fs/btrfs/Makefile +3 −2 Original line number Diff line number Diff line Loading @@ -9,11 +9,12 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ export.o tree-log.o free-space-cache.o zlib.o lzo.o \ compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \ reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \ uuid-tree.o props.o hash.o uuid-tree.o props.o hash.o free-space-tree.o btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o btrfs-$(CONFIG_BTRFS_FS_RUN_SANITY_TESTS) += tests/free-space-tests.o \ tests/extent-buffer-tests.o tests/btrfs-tests.o \ tests/extent-io-tests.o tests/inode-tests.o tests/qgroup-tests.o tests/extent-io-tests.o tests/inode-tests.o tests/qgroup-tests.o \ tests/free-space-tree-tests.o
fs/btrfs/ctree.h +155 −2 Original line number Diff line number Diff line Loading @@ -96,6 +96,9 @@ struct btrfs_ordered_sum; /* for storing items that use the BTRFS_UUID_KEY* types */ #define BTRFS_UUID_TREE_OBJECTID 9ULL /* tracks free space in block groups. */ #define BTRFS_FREE_SPACE_TREE_OBJECTID 10ULL /* for storing balance parameters in the root tree */ #define BTRFS_BALANCE_OBJECTID -4ULL Loading Loading @@ -500,6 +503,8 @@ struct btrfs_super_block { * Compat flags that we support. If any incompat flags are set other than the * ones specified below then we will fail to mount */ #define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE (1ULL << 0) #define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0) #define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1) #define BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS (1ULL << 2) Loading @@ -526,7 +531,10 @@ struct btrfs_super_block { #define BTRFS_FEATURE_COMPAT_SUPP 0ULL #define BTRFS_FEATURE_COMPAT_SAFE_SET 0ULL #define BTRFS_FEATURE_COMPAT_SAFE_CLEAR 0ULL #define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL #define BTRFS_FEATURE_COMPAT_RO_SUPP \ (BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE) #define BTRFS_FEATURE_COMPAT_RO_SAFE_SET 0ULL #define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR 0ULL Loading Loading @@ -1088,6 +1096,13 @@ struct btrfs_block_group_item { __le64 flags; } __attribute__ ((__packed__)); struct btrfs_free_space_info { __le32 extent_count; __le32 flags; } __attribute__ ((__packed__)); #define BTRFS_FREE_SPACE_USING_BITMAPS (1ULL << 0) #define BTRFS_QGROUP_LEVEL_SHIFT 48 static inline u64 btrfs_qgroup_level(u64 qgroupid) { Loading Loading @@ -1296,6 +1311,9 @@ struct btrfs_caching_control { atomic_t count; }; /* Once caching_thread() finds this much free space, it will wake up waiters. */ #define CACHING_CTL_WAKE_UP (1024 * 1024 * 2) struct btrfs_io_ctl { void *cur, *orig; struct page *page; Loading @@ -1321,8 +1339,20 @@ struct btrfs_block_group_cache { u64 delalloc_bytes; u64 bytes_super; u64 flags; u64 sectorsize; u64 cache_generation; u32 sectorsize; /* * If the free space extent count exceeds this number, convert the block * group to bitmaps. */ u32 bitmap_high_thresh; /* * If the free space extent count drops below this number, convert the * block group back to extents. */ u32 bitmap_low_thresh; /* * It is just used for the delayed data space allocation because Loading Loading @@ -1378,6 +1408,15 @@ struct btrfs_block_group_cache { struct list_head io_list; struct btrfs_io_ctl io_ctl; /* Lock for free space tree operations. */ struct mutex free_space_lock; /* * Does the block group need to be added to the free space tree? * Protected by free_space_lock. */ int needs_free_space; }; /* delayed seq elem */ Loading Loading @@ -1429,6 +1468,7 @@ struct btrfs_fs_info { struct btrfs_root *csum_root; struct btrfs_root *quota_root; struct btrfs_root *uuid_root; struct btrfs_root *free_space_root; /* the log root tree is a directory of all the other log roots */ struct btrfs_root *log_root_tree; Loading Loading @@ -2092,6 +2132,27 @@ struct btrfs_ioctl_defrag_range_args { */ #define BTRFS_BLOCK_GROUP_ITEM_KEY 192 /* * Every block group is represented in the free space tree by a free space info * item, which stores some accounting information. It is keyed on * (block_group_start, FREE_SPACE_INFO, block_group_length). */ #define BTRFS_FREE_SPACE_INFO_KEY 198 /* * A free space extent tracks an extent of space that is free in a block group. * It is keyed on (start, FREE_SPACE_EXTENT, length). */ #define BTRFS_FREE_SPACE_EXTENT_KEY 199 /* * When a block group becomes very fragmented, we convert it to use bitmaps * instead of extents. A free space bitmap is keyed on * (start, FREE_SPACE_BITMAP, length); the corresponding item is a bitmap with * (length / sectorsize) bits. */ #define BTRFS_FREE_SPACE_BITMAP_KEY 200 #define BTRFS_DEV_EXTENT_KEY 204 #define BTRFS_DEV_ITEM_KEY 216 #define BTRFS_CHUNK_ITEM_KEY 228 Loading Loading @@ -2184,6 +2245,7 @@ struct btrfs_ioctl_defrag_range_args { #define BTRFS_MOUNT_RESCAN_UUID_TREE (1 << 23) #define BTRFS_MOUNT_FRAGMENT_DATA (1 << 24) #define BTRFS_MOUNT_FRAGMENT_METADATA (1 << 25) #define BTRFS_MOUNT_FREE_SPACE_TREE (1 << 26) #define BTRFS_DEFAULT_COMMIT_INTERVAL (30) #define BTRFS_DEFAULT_MAX_INLINE (8192) Loading Loading @@ -2506,6 +2568,11 @@ BTRFS_SETGET_FUNCS(disk_block_group_flags, BTRFS_SETGET_STACK_FUNCS(block_group_flags, struct btrfs_block_group_item, flags, 64); /* struct btrfs_free_space_info */ BTRFS_SETGET_FUNCS(free_space_extent_count, struct btrfs_free_space_info, extent_count, 32); BTRFS_SETGET_FUNCS(free_space_flags, struct btrfs_free_space_info, flags, 32); /* struct btrfs_inode_ref */ BTRFS_SETGET_FUNCS(inode_ref_name_len, struct btrfs_inode_ref, name_len, 16); BTRFS_SETGET_FUNCS(inode_ref_index, struct btrfs_inode_ref, index, 64); Loading Loading @@ -3573,6 +3640,9 @@ void btrfs_end_write_no_snapshoting(struct btrfs_root *root); void check_system_chunk(struct btrfs_trans_handle *trans, struct btrfs_root *root, const u64 type); u64 add_new_free_space(struct btrfs_block_group_cache *block_group, struct btrfs_fs_info *info, u64 start, u64 end); /* ctree.c */ int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, int level, int *slot); Loading Loading @@ -3737,6 +3807,7 @@ static inline void free_fs_info(struct btrfs_fs_info *fs_info) kfree(fs_info->csum_root); kfree(fs_info->quota_root); kfree(fs_info->uuid_root); kfree(fs_info->free_space_root); kfree(fs_info->super_copy); kfree(fs_info->super_for_commit); security_free_mnt_opts(&fs_info->security_opts); Loading Loading @@ -4246,6 +4317,30 @@ static inline void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, } } #define btrfs_clear_fs_incompat(__fs_info, opt) \ __btrfs_clear_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt) static inline void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag) { struct btrfs_super_block *disk_super; u64 features; disk_super = fs_info->super_copy; features = btrfs_super_incompat_flags(disk_super); if (features & flag) { spin_lock(&fs_info->super_lock); features = btrfs_super_incompat_flags(disk_super); if (features & flag) { features &= ~flag; btrfs_set_super_incompat_flags(disk_super, features); btrfs_info(fs_info, "clearing %llu feature flag", flag); } spin_unlock(&fs_info->super_lock); } } #define btrfs_fs_incompat(fs_info, opt) \ __btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt) Loading @@ -4256,6 +4351,64 @@ static inline int __btrfs_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag) return !!(btrfs_super_incompat_flags(disk_super) & flag); } #define btrfs_set_fs_compat_ro(__fs_info, opt) \ __btrfs_set_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt) static inline void __btrfs_set_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag) { struct btrfs_super_block *disk_super; u64 features; disk_super = fs_info->super_copy; features = btrfs_super_compat_ro_flags(disk_super); if (!(features & flag)) { spin_lock(&fs_info->super_lock); features = btrfs_super_compat_ro_flags(disk_super); if (!(features & flag)) { features |= flag; btrfs_set_super_compat_ro_flags(disk_super, features); btrfs_info(fs_info, "setting %llu ro feature flag", flag); } spin_unlock(&fs_info->super_lock); } } #define btrfs_clear_fs_compat_ro(__fs_info, opt) \ __btrfs_clear_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt) static inline void __btrfs_clear_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag) { struct btrfs_super_block *disk_super; u64 features; disk_super = fs_info->super_copy; features = btrfs_super_compat_ro_flags(disk_super); if (features & flag) { spin_lock(&fs_info->super_lock); features = btrfs_super_compat_ro_flags(disk_super); if (features & flag) { features &= ~flag; btrfs_set_super_compat_ro_flags(disk_super, features); btrfs_info(fs_info, "clearing %llu ro feature flag", flag); } spin_unlock(&fs_info->super_lock); } } #define btrfs_fs_compat_ro(fs_info, opt) \ __btrfs_fs_compat_ro((fs_info), BTRFS_FEATURE_COMPAT_RO_##opt) static inline int __btrfs_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag) { struct btrfs_super_block *disk_super; disk_super = fs_info->super_copy; return !!(btrfs_super_compat_ro_flags(disk_super) & flag); } /* * Call btrfs_abort_transaction as early as possible when an error condition is * detected, that way the exact line number is reported. Loading
fs/btrfs/disk-io.c +38 −0 Original line number Diff line number Diff line Loading @@ -42,6 +42,7 @@ #include "locking.h" #include "tree-log.h" #include "free-space-cache.h" #include "free-space-tree.h" #include "inode-map.h" #include "check-integrity.h" #include "rcu-string.h" Loading Loading @@ -1650,6 +1651,9 @@ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, if (location->objectid == BTRFS_UUID_TREE_OBJECTID) return fs_info->uuid_root ? fs_info->uuid_root : ERR_PTR(-ENOENT); if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID) return fs_info->free_space_root ? fs_info->free_space_root : ERR_PTR(-ENOENT); again: root = btrfs_lookup_fs_root(fs_info, location->objectid); if (root) { Loading Loading @@ -2148,6 +2152,7 @@ static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root) free_root_extent_buffers(info->uuid_root); if (chunk_root) free_root_extent_buffers(info->chunk_root); free_root_extent_buffers(info->free_space_root); } void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info) Loading Loading @@ -2448,6 +2453,15 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info, fs_info->uuid_root = root; } if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { location.objectid = BTRFS_FREE_SPACE_TREE_OBJECTID; root = btrfs_read_tree_root(tree_root, &location); if (IS_ERR(root)) return PTR_ERR(root); set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); fs_info->free_space_root = root; } return 0; } Loading Loading @@ -3076,6 +3090,30 @@ int open_ctree(struct super_block *sb, btrfs_qgroup_rescan_resume(fs_info); if (btrfs_test_opt(tree_root, CLEAR_CACHE) && btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { pr_info("BTRFS: clearing free space tree\n"); ret = btrfs_clear_free_space_tree(fs_info); if (ret) { pr_warn("BTRFS: failed to clear free space tree %d\n", ret); close_ctree(tree_root); return ret; } } if (btrfs_test_opt(tree_root, FREE_SPACE_TREE) && !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { pr_info("BTRFS: creating free space tree\n"); ret = btrfs_create_free_space_tree(fs_info); if (ret) { pr_warn("BTRFS: failed to create free space tree %d\n", ret); close_ctree(tree_root); return ret; } } if (!fs_info->uuid_root) { pr_info("BTRFS: creating UUID tree\n"); ret = btrfs_create_uuid_tree(fs_info); Loading
fs/btrfs/extent-tree.c +71 −30 Original line number Diff line number Diff line Loading @@ -33,6 +33,7 @@ #include "raid56.h" #include "locking.h" #include "free-space-cache.h" #include "free-space-tree.h" #include "math.h" #include "sysfs.h" #include "qgroup.h" Loading Loading @@ -357,7 +358,7 @@ static void fragment_free_space(struct btrfs_root *root, * we need to check the pinned_extents for any extents that can't be used yet * since their free space will be released as soon as the transaction commits. */ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group, u64 add_new_free_space(struct btrfs_block_group_cache *block_group, struct btrfs_fs_info *info, u64 start, u64 end) { u64 extent_start, extent_end, size, total_added = 0; Loading Loading @@ -395,11 +396,10 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group, return total_added; } static noinline void caching_thread(struct btrfs_work *work) static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl) { struct btrfs_block_group_cache *block_group; struct btrfs_fs_info *fs_info; struct btrfs_caching_control *caching_ctl; struct btrfs_root *extent_root; struct btrfs_path *path; struct extent_buffer *leaf; Loading @@ -407,17 +407,16 @@ static noinline void caching_thread(struct btrfs_work *work) u64 total_found = 0; u64 last = 0; u32 nritems; int ret = -ENOMEM; int ret; bool wakeup = true; caching_ctl = container_of(work, struct btrfs_caching_control, work); block_group = caching_ctl->block_group; fs_info = block_group->fs_info; extent_root = fs_info->extent_root; path = btrfs_alloc_path(); if (!path) goto out; return -ENOMEM; last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); Loading @@ -443,15 +442,11 @@ static noinline void caching_thread(struct btrfs_work *work) key.objectid = last; key.offset = 0; key.type = BTRFS_EXTENT_ITEM_KEY; again: mutex_lock(&caching_ctl->mutex); /* need to make sure the commit_root doesn't disappear */ down_read(&fs_info->commit_root_sem); next: ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); if (ret < 0) goto err; goto out; leaf = path->nodes[0]; nritems = btrfs_header_nritems(leaf); Loading @@ -477,12 +472,14 @@ static noinline void caching_thread(struct btrfs_work *work) up_read(&fs_info->commit_root_sem); mutex_unlock(&caching_ctl->mutex); cond_resched(); goto again; mutex_lock(&caching_ctl->mutex); down_read(&fs_info->commit_root_sem); goto next; } ret = btrfs_next_leaf(extent_root, path); if (ret < 0) goto err; goto out; if (ret) break; leaf = path->nodes[0]; Loading Loading @@ -521,7 +518,7 @@ static noinline void caching_thread(struct btrfs_work *work) else last = key.objectid + key.offset; if (total_found > (1024 * 1024 * 2)) { if (total_found > CACHING_CTL_WAKE_UP) { total_found = 0; if (wakeup) wake_up(&caching_ctl->wait); Loading @@ -534,9 +531,35 @@ static noinline void caching_thread(struct btrfs_work *work) total_found += add_new_free_space(block_group, fs_info, last, block_group->key.objectid + block_group->key.offset); caching_ctl->progress = (u64)-1; out: btrfs_free_path(path); return ret; } static noinline void caching_thread(struct btrfs_work *work) { struct btrfs_block_group_cache *block_group; struct btrfs_fs_info *fs_info; struct btrfs_caching_control *caching_ctl; int ret; caching_ctl = container_of(work, struct btrfs_caching_control, work); block_group = caching_ctl->block_group; fs_info = block_group->fs_info; mutex_lock(&caching_ctl->mutex); down_read(&fs_info->commit_root_sem); if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) ret = load_free_space_tree(caching_ctl); else ret = load_extent_tree_free(caching_ctl); spin_lock(&block_group->lock); block_group->caching_ctl = NULL; block_group->cached = BTRFS_CACHE_FINISHED; block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED; spin_unlock(&block_group->lock); #ifdef CONFIG_BTRFS_DEBUG Loading @@ -555,20 +578,11 @@ static noinline void caching_thread(struct btrfs_work *work) #endif caching_ctl->progress = (u64)-1; err: btrfs_free_path(path); up_read(&fs_info->commit_root_sem); free_excluded_extents(extent_root, block_group); up_read(&fs_info->commit_root_sem); free_excluded_extents(fs_info->extent_root, block_group); mutex_unlock(&caching_ctl->mutex); out: if (ret) { spin_lock(&block_group->lock); block_group->caching_ctl = NULL; block_group->cached = BTRFS_CACHE_ERROR; spin_unlock(&block_group->lock); } wake_up(&caching_ctl->wait); put_caching_control(caching_ctl); Loading Loading @@ -680,8 +694,8 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, } } else { /* * We are not going to do the fast caching, set cached to the * appropriate value and wakeup any waiters. * We're either using the free space tree or no caching at all. * Set cached to the appropriate value and wakeup any waiters. */ spin_lock(&cache->lock); if (load_cache_only) { Loading Loading @@ -6676,6 +6690,13 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, } } ret = add_to_free_space_tree(trans, root->fs_info, bytenr, num_bytes); if (ret) { btrfs_abort_transaction(trans, extent_root, ret); goto out; } ret = update_block_group(trans, root, bytenr, num_bytes, 0); if (ret) { btrfs_abort_transaction(trans, extent_root, ret); Loading Loading @@ -7687,6 +7708,11 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, btrfs_mark_buffer_dirty(path->nodes[0]); btrfs_free_path(path); ret = remove_from_free_space_tree(trans, fs_info, ins->objectid, ins->offset); if (ret) return ret; ret = update_block_group(trans, root, ins->objectid, ins->offset, 1); if (ret) { /* -ENOENT, logic error */ btrfs_err(fs_info, "update block group failed for %llu %llu", Loading Loading @@ -7767,6 +7793,11 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, btrfs_mark_buffer_dirty(leaf); btrfs_free_path(path); ret = remove_from_free_space_tree(trans, fs_info, ins->objectid, num_bytes); if (ret) return ret; ret = update_block_group(trans, root, ins->objectid, root->nodesize, 1); if (ret) { /* -ENOENT, logic error */ Loading Loading @@ -9671,6 +9702,8 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size) cache->full_stripe_len = btrfs_full_stripe_len(root, &root->fs_info->mapping_tree, start); set_free_space_tree_thresholds(cache); atomic_set(&cache->count, 1); spin_lock_init(&cache->lock); init_rwsem(&cache->data_rwsem); Loading @@ -9682,6 +9715,7 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size) INIT_LIST_HEAD(&cache->io_list); btrfs_init_free_space_ctl(cache); atomic_set(&cache->trimming, 0); mutex_init(&cache->free_space_lock); return cache; } Loading Loading @@ -9892,6 +9926,8 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans, key.objectid, key.offset); if (ret) btrfs_abort_transaction(trans, extent_root, ret); add_block_group_free_space(trans, root->fs_info, block_group); /* already aborted the transaction if it failed. */ next: list_del_init(&block_group->bg_list); } Loading Loading @@ -9922,6 +9958,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, cache->flags = type; cache->last_byte_to_unpin = (u64)-1; cache->cached = BTRFS_CACHE_FINISHED; cache->needs_free_space = 1; ret = exclude_super_stripes(root, cache); if (ret) { /* Loading Loading @@ -10292,6 +10329,10 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, unlock_chunks(root); ret = remove_block_group_free_space(trans, root->fs_info, block_group); if (ret) goto out; btrfs_put_block_group(block_group); btrfs_put_block_group(block_group); Loading
fs/btrfs/extent_io.c +170 −13 Original line number Diff line number Diff line Loading @@ -4702,24 +4702,14 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src) return new; } struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, u64 start) struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, u64 start, unsigned long len) { struct extent_buffer *eb; unsigned long len; unsigned long num_pages; unsigned long i; if (!fs_info) { /* * Called only from tests that don't always have a fs_info * available, but we know that nodesize is 4096 */ len = 4096; } else { len = fs_info->tree_root->nodesize; } num_pages = num_extent_pages(0, len); num_pages = num_extent_pages(start, len); eb = __alloc_extent_buffer(fs_info, start, len); if (!eb) Loading @@ -4742,6 +4732,24 @@ struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, return NULL; } struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, u64 start) { unsigned long len; if (!fs_info) { /* * Called only from tests that don't always have a fs_info * available, but we know that nodesize is 4096 */ len = 4096; } else { len = fs_info->tree_root->nodesize; } return __alloc_dummy_extent_buffer(fs_info, start, len); } static void check_buffer_tree_ref(struct extent_buffer *eb) { int refs; Loading Loading @@ -5497,6 +5505,155 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, } } /* * The extent buffer bitmap operations are done with byte granularity because * bitmap items are not guaranteed to be aligned to a word and therefore a * single word in a bitmap may straddle two pages in the extent buffer. */ #define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE) #define BYTE_MASK ((1 << BITS_PER_BYTE) - 1) #define BITMAP_FIRST_BYTE_MASK(start) \ ((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK) #define BITMAP_LAST_BYTE_MASK(nbits) \ (BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1))) /* * eb_bitmap_offset() - calculate the page and offset of the byte containing the * given bit number * @eb: the extent buffer * @start: offset of the bitmap item in the extent buffer * @nr: bit number * @page_index: return index of the page in the extent buffer that contains the * given bit number * @page_offset: return offset into the page given by page_index * * This helper hides the ugliness of finding the byte in an extent buffer which * contains a given bit. */ static inline void eb_bitmap_offset(struct extent_buffer *eb, unsigned long start, unsigned long nr, unsigned long *page_index, size_t *page_offset) { size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); size_t byte_offset = BIT_BYTE(nr); size_t offset; /* * The byte we want is the offset of the extent buffer + the offset of * the bitmap item in the extent buffer + the offset of the byte in the * bitmap item. */ offset = start_offset + start + byte_offset; *page_index = offset >> PAGE_CACHE_SHIFT; *page_offset = offset & (PAGE_CACHE_SIZE - 1); } /** * extent_buffer_test_bit - determine whether a bit in a bitmap item is set * @eb: the extent buffer * @start: offset of the bitmap item in the extent buffer * @nr: bit number to test */ int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start, unsigned long nr) { char *kaddr; struct page *page; unsigned long i; size_t offset; eb_bitmap_offset(eb, start, nr, &i, &offset); page = eb->pages[i]; WARN_ON(!PageUptodate(page)); kaddr = page_address(page); return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1))); } /** * extent_buffer_bitmap_set - set an area of a bitmap * @eb: the extent buffer * @start: offset of the bitmap item in the extent buffer * @pos: bit number of the first bit * @len: number of bits to set */ void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start, unsigned long pos, unsigned long len) { char *kaddr; struct page *page; unsigned long i; size_t offset; const unsigned int size = pos + len; int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE); unsigned int mask_to_set = BITMAP_FIRST_BYTE_MASK(pos); eb_bitmap_offset(eb, start, pos, &i, &offset); page = eb->pages[i]; WARN_ON(!PageUptodate(page)); kaddr = page_address(page); while (len >= bits_to_set) { kaddr[offset] |= mask_to_set; len -= bits_to_set; bits_to_set = BITS_PER_BYTE; mask_to_set = ~0U; if (++offset >= PAGE_CACHE_SIZE && len > 0) { offset = 0; page = eb->pages[++i]; WARN_ON(!PageUptodate(page)); kaddr = page_address(page); } } if (len) { mask_to_set &= BITMAP_LAST_BYTE_MASK(size); kaddr[offset] |= mask_to_set; } } /** * extent_buffer_bitmap_clear - clear an area of a bitmap * @eb: the extent buffer * @start: offset of the bitmap item in the extent buffer * @pos: bit number of the first bit * @len: number of bits to clear */ void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start, unsigned long pos, unsigned long len) { char *kaddr; struct page *page; unsigned long i; size_t offset; const unsigned int size = pos + len; int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE); unsigned int mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos); eb_bitmap_offset(eb, start, pos, &i, &offset); page = eb->pages[i]; WARN_ON(!PageUptodate(page)); kaddr = page_address(page); while (len >= bits_to_clear) { kaddr[offset] &= ~mask_to_clear; len -= bits_to_clear; bits_to_clear = BITS_PER_BYTE; mask_to_clear = ~0U; if (++offset >= PAGE_CACHE_SIZE && len > 0) { offset = 0; page = eb->pages[++i]; WARN_ON(!PageUptodate(page)); kaddr = page_address(page); } } if (len) { mask_to_clear &= BITMAP_LAST_BYTE_MASK(size); kaddr[offset] &= ~mask_to_clear; } } static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len) { unsigned long distance = (src > dst) ? src - dst : dst - src; Loading