Loading fs/btrfs/disk-io.c +6 −1 Original line number Diff line number Diff line Loading @@ -250,7 +250,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root, if (!ret && !verify_parent_transid(io_tree, eb, parent_transid)) return ret; printk("read extent buffer pages failed with ret %d mirror no %d\n", ret, mirror_num); num_copies = btrfs_num_copies(&root->fs_info->mapping_tree, eb->start, eb->len); if (num_copies == 1) Loading Loading @@ -348,6 +348,9 @@ int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, found_start = btrfs_header_bytenr(eb); if (found_start != start) { printk("bad tree block start %llu %llu\n", (unsigned long long)found_start, (unsigned long long)eb->start); ret = -EIO; goto err; } Loading Loading @@ -709,6 +712,8 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, if (ret == 0) { buf->flags |= EXTENT_UPTODATE; } else { WARN_ON(1); } return buf; Loading fs/btrfs/extent_io.c +14 −7 Original line number Diff line number Diff line Loading @@ -1811,6 +1811,7 @@ printk("2bad mapping end %Lu cur %Lu\n", end, cur); } /* the get_extent function already copied into the page */ if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) { check_page_uptodate(tree, page); unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); cur = cur + iosize; page_offset += iosize; Loading Loading @@ -2785,21 +2786,20 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree, * properly set. releasepage may drop page->private * on us if the page isn't already dirty. */ if (i == 0) { lock_page(page); if (i == 0) { set_page_extent_head(page, eb->len); } else if (PagePrivate(page) && page->private != EXTENT_PAGE_PRIVATE) { lock_page(page); set_page_extent_mapped(page); unlock_page(page); } __set_page_dirty_nobuffers(extent_buffer_page(eb, i)); if (i == 0) set_extent_dirty(tree, page_offset(page), page_offset(page) + PAGE_CACHE_SIZE -1, GFP_NOFS); unlock_page(page); } return set_extent_dirty(tree, eb->start, eb->start + eb->len - 1, GFP_NOFS); return 0; } EXPORT_SYMBOL(set_extent_buffer_dirty); Loading Loading @@ -2952,6 +2952,9 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, if (all_uptodate) { if (start_i == 0) eb->flags |= EXTENT_UPTODATE; if (ret) { printk("all up to date but ret is %d\n", ret); } goto unlock_exit; } Loading @@ -2968,6 +2971,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, mirror_num); if (err) { ret = err; printk("err %d from __extent_read_full_page\n", ret); } } else { unlock_page(page); Loading @@ -2978,12 +2982,15 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, submit_one_bio(READ, bio, mirror_num); if (ret || !wait) { if (ret) printk("ret %d wait %d returning\n", ret, wait); return ret; } for (i = start_i; i < num_pages; i++) { page = extent_buffer_page(eb, i); wait_on_page_locked(page); if (!PageUptodate(page)) { printk("page not uptodate after wait_on_page_locked\n"); ret = -EIO; } } Loading fs/btrfs/file.c +8 −7 Original line number Diff line number Diff line Loading @@ -39,7 +39,8 @@ #include "compat.h" static int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes, static int noinline btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes, struct page **prepared_pages, const char __user * buf) { Loading Loading @@ -69,7 +70,7 @@ static int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes, return page_fault ? -EFAULT : 0; } static void btrfs_drop_pages(struct page **pages, size_t num_pages) static void noinline btrfs_drop_pages(struct page **pages, size_t num_pages) { size_t i; for (i = 0; i < num_pages; i++) { Loading Loading @@ -359,7 +360,7 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, return err; } int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) int noinline btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) { struct extent_map *em; struct extent_map *split = NULL; Loading Loading @@ -515,7 +516,7 @@ int btrfs_check_file(struct btrfs_root *root, struct inode *inode) * it is either truncated or split. Anything entirely inside the range * is deleted from the tree. */ int btrfs_drop_extents(struct btrfs_trans_handle *trans, int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, u64 start, u64 end, u64 inline_limit, u64 *hint_byte) { Loading Loading @@ -785,7 +786,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, /* * this gets pages into the page cache and locks them down */ static int prepare_pages(struct btrfs_root *root, struct file *file, static int noinline prepare_pages(struct btrfs_root *root, struct file *file, struct page **pages, size_t num_pages, loff_t pos, unsigned long first_index, unsigned long last_index, size_t write_bytes) Loading fs/btrfs/volumes.c +21 −19 Original line number Diff line number Diff line Loading @@ -94,8 +94,8 @@ int btrfs_cleanup_fs_uuids(void) return 0; } static struct btrfs_device *__find_device(struct list_head *head, u64 devid, u8 *uuid) static noinline struct btrfs_device *__find_device(struct list_head *head, u64 devid, u8 *uuid) { struct btrfs_device *dev; struct list_head *cur; Loading @@ -110,7 +110,7 @@ static struct btrfs_device *__find_device(struct list_head *head, u64 devid, return NULL; } static struct btrfs_fs_devices *find_fsid(u8 *fsid) static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid) { struct list_head *cur; struct btrfs_fs_devices *fs_devices; Loading @@ -134,7 +134,7 @@ static struct btrfs_fs_devices *find_fsid(u8 *fsid) * the list if the block device is congested. This way, multiple devices * can make progress from a single worker thread. */ int run_scheduled_bios(struct btrfs_device *device) static int noinline run_scheduled_bios(struct btrfs_device *device) { struct bio *pending; struct backing_dev_info *bdi; Loading Loading @@ -233,7 +233,7 @@ void pending_bios_fn(struct btrfs_work *work) run_scheduled_bios(device); } static int device_list_add(const char *path, static noinline int device_list_add(const char *path, struct btrfs_super_block *disk_super, u64 devid, struct btrfs_fs_devices **fs_devices_ret) { Loading Loading @@ -480,7 +480,7 @@ int btrfs_scan_one_device(const char *path, int flags, void *holder, * called very infrequently and that a given device has a small number * of extents */ static int find_free_dev_extent(struct btrfs_trans_handle *trans, static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans, struct btrfs_device *device, struct btrfs_path *path, u64 num_bytes, u64 *start) Loading Loading @@ -645,7 +645,7 @@ int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, return ret; } int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, int noinline btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, Loading Loading @@ -693,7 +693,8 @@ int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, return ret; } static int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset) static noinline int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset) { struct btrfs_path *path; int ret; Loading Loading @@ -735,8 +736,8 @@ static int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset) return ret; } static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path, u64 *objectid) static noinline int find_next_devid(struct btrfs_root *root, struct btrfs_path *path, u64 *objectid) { int ret; struct btrfs_key key; Loading Loading @@ -1103,7 +1104,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) goto out; } int btrfs_update_device(struct btrfs_trans_handle *trans, int noinline btrfs_update_device(struct btrfs_trans_handle *trans, struct btrfs_device *device) { int ret; Loading Loading @@ -1544,8 +1545,8 @@ int btrfs_add_system_chunk(struct btrfs_trans_handle *trans, return 0; } static u64 chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes, int sub_stripes) static u64 noinline chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes, int sub_stripes) { if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP)) return calc_size; Loading Loading @@ -2141,7 +2142,8 @@ struct async_sched { * This will add one bio to the pending list for a device and make sure * the work struct is scheduled. */ int schedule_bio(struct btrfs_root *root, struct btrfs_device *device, static int noinline schedule_bio(struct btrfs_root *root, struct btrfs_device *device, int rw, struct bio *bio) { int should_queue = 1; Loading Loading
fs/btrfs/disk-io.c +6 −1 Original line number Diff line number Diff line Loading @@ -250,7 +250,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root, if (!ret && !verify_parent_transid(io_tree, eb, parent_transid)) return ret; printk("read extent buffer pages failed with ret %d mirror no %d\n", ret, mirror_num); num_copies = btrfs_num_copies(&root->fs_info->mapping_tree, eb->start, eb->len); if (num_copies == 1) Loading Loading @@ -348,6 +348,9 @@ int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, found_start = btrfs_header_bytenr(eb); if (found_start != start) { printk("bad tree block start %llu %llu\n", (unsigned long long)found_start, (unsigned long long)eb->start); ret = -EIO; goto err; } Loading Loading @@ -709,6 +712,8 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, if (ret == 0) { buf->flags |= EXTENT_UPTODATE; } else { WARN_ON(1); } return buf; Loading
fs/btrfs/extent_io.c +14 −7 Original line number Diff line number Diff line Loading @@ -1811,6 +1811,7 @@ printk("2bad mapping end %Lu cur %Lu\n", end, cur); } /* the get_extent function already copied into the page */ if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) { check_page_uptodate(tree, page); unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); cur = cur + iosize; page_offset += iosize; Loading Loading @@ -2785,21 +2786,20 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree, * properly set. releasepage may drop page->private * on us if the page isn't already dirty. */ if (i == 0) { lock_page(page); if (i == 0) { set_page_extent_head(page, eb->len); } else if (PagePrivate(page) && page->private != EXTENT_PAGE_PRIVATE) { lock_page(page); set_page_extent_mapped(page); unlock_page(page); } __set_page_dirty_nobuffers(extent_buffer_page(eb, i)); if (i == 0) set_extent_dirty(tree, page_offset(page), page_offset(page) + PAGE_CACHE_SIZE -1, GFP_NOFS); unlock_page(page); } return set_extent_dirty(tree, eb->start, eb->start + eb->len - 1, GFP_NOFS); return 0; } EXPORT_SYMBOL(set_extent_buffer_dirty); Loading Loading @@ -2952,6 +2952,9 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, if (all_uptodate) { if (start_i == 0) eb->flags |= EXTENT_UPTODATE; if (ret) { printk("all up to date but ret is %d\n", ret); } goto unlock_exit; } Loading @@ -2968,6 +2971,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, mirror_num); if (err) { ret = err; printk("err %d from __extent_read_full_page\n", ret); } } else { unlock_page(page); Loading @@ -2978,12 +2982,15 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, submit_one_bio(READ, bio, mirror_num); if (ret || !wait) { if (ret) printk("ret %d wait %d returning\n", ret, wait); return ret; } for (i = start_i; i < num_pages; i++) { page = extent_buffer_page(eb, i); wait_on_page_locked(page); if (!PageUptodate(page)) { printk("page not uptodate after wait_on_page_locked\n"); ret = -EIO; } } Loading
fs/btrfs/file.c +8 −7 Original line number Diff line number Diff line Loading @@ -39,7 +39,8 @@ #include "compat.h" static int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes, static int noinline btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes, struct page **prepared_pages, const char __user * buf) { Loading Loading @@ -69,7 +70,7 @@ static int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes, return page_fault ? -EFAULT : 0; } static void btrfs_drop_pages(struct page **pages, size_t num_pages) static void noinline btrfs_drop_pages(struct page **pages, size_t num_pages) { size_t i; for (i = 0; i < num_pages; i++) { Loading Loading @@ -359,7 +360,7 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans, return err; } int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) int noinline btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end) { struct extent_map *em; struct extent_map *split = NULL; Loading Loading @@ -515,7 +516,7 @@ int btrfs_check_file(struct btrfs_root *root, struct inode *inode) * it is either truncated or split. Anything entirely inside the range * is deleted from the tree. */ int btrfs_drop_extents(struct btrfs_trans_handle *trans, int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, u64 start, u64 end, u64 inline_limit, u64 *hint_byte) { Loading Loading @@ -785,7 +786,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, /* * this gets pages into the page cache and locks them down */ static int prepare_pages(struct btrfs_root *root, struct file *file, static int noinline prepare_pages(struct btrfs_root *root, struct file *file, struct page **pages, size_t num_pages, loff_t pos, unsigned long first_index, unsigned long last_index, size_t write_bytes) Loading
fs/btrfs/volumes.c +21 −19 Original line number Diff line number Diff line Loading @@ -94,8 +94,8 @@ int btrfs_cleanup_fs_uuids(void) return 0; } static struct btrfs_device *__find_device(struct list_head *head, u64 devid, u8 *uuid) static noinline struct btrfs_device *__find_device(struct list_head *head, u64 devid, u8 *uuid) { struct btrfs_device *dev; struct list_head *cur; Loading @@ -110,7 +110,7 @@ static struct btrfs_device *__find_device(struct list_head *head, u64 devid, return NULL; } static struct btrfs_fs_devices *find_fsid(u8 *fsid) static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid) { struct list_head *cur; struct btrfs_fs_devices *fs_devices; Loading @@ -134,7 +134,7 @@ static struct btrfs_fs_devices *find_fsid(u8 *fsid) * the list if the block device is congested. This way, multiple devices * can make progress from a single worker thread. */ int run_scheduled_bios(struct btrfs_device *device) static int noinline run_scheduled_bios(struct btrfs_device *device) { struct bio *pending; struct backing_dev_info *bdi; Loading Loading @@ -233,7 +233,7 @@ void pending_bios_fn(struct btrfs_work *work) run_scheduled_bios(device); } static int device_list_add(const char *path, static noinline int device_list_add(const char *path, struct btrfs_super_block *disk_super, u64 devid, struct btrfs_fs_devices **fs_devices_ret) { Loading Loading @@ -480,7 +480,7 @@ int btrfs_scan_one_device(const char *path, int flags, void *holder, * called very infrequently and that a given device has a small number * of extents */ static int find_free_dev_extent(struct btrfs_trans_handle *trans, static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans, struct btrfs_device *device, struct btrfs_path *path, u64 num_bytes, u64 *start) Loading Loading @@ -645,7 +645,7 @@ int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, return ret; } int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, int noinline btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, Loading Loading @@ -693,7 +693,8 @@ int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans, return ret; } static int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset) static noinline int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset) { struct btrfs_path *path; int ret; Loading Loading @@ -735,8 +736,8 @@ static int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset) return ret; } static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path, u64 *objectid) static noinline int find_next_devid(struct btrfs_root *root, struct btrfs_path *path, u64 *objectid) { int ret; struct btrfs_key key; Loading Loading @@ -1103,7 +1104,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) goto out; } int btrfs_update_device(struct btrfs_trans_handle *trans, int noinline btrfs_update_device(struct btrfs_trans_handle *trans, struct btrfs_device *device) { int ret; Loading Loading @@ -1544,8 +1545,8 @@ int btrfs_add_system_chunk(struct btrfs_trans_handle *trans, return 0; } static u64 chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes, int sub_stripes) static u64 noinline chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes, int sub_stripes) { if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP)) return calc_size; Loading Loading @@ -2141,7 +2142,8 @@ struct async_sched { * This will add one bio to the pending list for a device and make sure * the work struct is scheduled. */ int schedule_bio(struct btrfs_root *root, struct btrfs_device *device, static int noinline schedule_bio(struct btrfs_root *root, struct btrfs_device *device, int rw, struct bio *bio) { int should_queue = 1; Loading