Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 484b267c authored by Jaegeuk Kim's avatar Jaegeuk Kim Committed by Jaegeuk Kim
Browse files

f2fs: refactor extent_cache to support for read and more



This patch prepares extent_cache to be ready for addition.

Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent de9b2235
Loading
Loading
Loading
Loading
+10 −10
Original line number Diff line number Diff line
@@ -1088,7 +1088,7 @@ void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
	dn->data_blkaddr = blkaddr;
	f2fs_set_data_blkaddr(dn);
	f2fs_update_extent_cache(dn);
	f2fs_update_read_extent_cache(dn);
}

/* dn->ofs_in_node will be returned with up-to-date last block pointer */
@@ -1157,7 +1157,7 @@ int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
	struct extent_info ei = {0, };
	struct inode *inode = dn->inode;

	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
	if (f2fs_lookup_read_extent_cache(inode, index, &ei)) {
		dn->data_blkaddr = ei.blk + index - ei.fofs;
		return 0;
	}
@@ -1179,7 +1179,7 @@ struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
	if (!page)
		return ERR_PTR(-ENOMEM);

	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
	if (f2fs_lookup_read_extent_cache(inode, index, &ei)) {
		dn.data_blkaddr = ei.blk + index - ei.fofs;
		if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
						DATA_GENERIC_ENHANCE_READ)) {
@@ -1452,7 +1452,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
	pgofs =	(pgoff_t)map->m_lblk;
	end = pgofs + maxblocks;

	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
	if (!create && f2fs_lookup_read_extent_cache(inode, pgofs, &ei)) {
		if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
							map->m_may_create)
			goto next_dnode;
@@ -1662,7 +1662,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
		if (map->m_flags & F2FS_MAP_MAPPED) {
			unsigned int ofs = start_pgofs - map->m_lblk;

			f2fs_update_extent_cache_range(&dn,
			f2fs_update_read_extent_cache_range(&dn,
				start_pgofs, map->m_pblk + ofs,
				map->m_len - ofs);
		}
@@ -1707,7 +1707,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
		if (map->m_flags & F2FS_MAP_MAPPED) {
			unsigned int ofs = start_pgofs - map->m_lblk;

			f2fs_update_extent_cache_range(&dn,
			f2fs_update_read_extent_cache_range(&dn,
				start_pgofs, map->m_pblk + ofs,
				map->m_len - ofs);
		}
@@ -2222,7 +2222,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
	if (f2fs_cluster_is_empty(cc))
		goto out;

	if (f2fs_lookup_extent_cache(inode, start_idx, &ei))
	if (f2fs_lookup_read_extent_cache(inode, start_idx, &ei))
		from_dnode = false;

	if (!from_dnode)
@@ -2687,7 +2687,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
		set_new_dnode(&dn, inode, NULL, NULL, 0);

	if (need_inplace_update(fio) &&
			f2fs_lookup_extent_cache(inode, page->index, &ei)) {
	    f2fs_lookup_read_extent_cache(inode, page->index, &ei)) {
		fio->old_blkaddr = ei.blk + page->index - ei.fofs;

		if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
@@ -3413,7 +3413,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
	} else if (locked) {
		err = f2fs_get_block(&dn, index);
	} else {
		if (f2fs_lookup_extent_cache(inode, index, &ei)) {
		if (f2fs_lookup_read_extent_cache(inode, index, &ei)) {
			dn.data_blkaddr = ei.blk + index - ei.fofs;
		} else {
			/* hole case */
@@ -3454,7 +3454,7 @@ static int __find_data_block(struct inode *inode, pgoff_t index,

	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
	if (f2fs_lookup_read_extent_cache(inode, index, &ei)) {
		dn.data_blkaddr = ei.blk + index - ei.fofs;
	} else {
		/* hole case */
+44 −21
Original line number Diff line number Diff line
@@ -72,15 +72,23 @@ static void update_general_status(struct f2fs_sb_info *sbi)
	si->main_area_zones = si->main_area_sections /
				le32_to_cpu(raw_super->secs_per_zone);

	/* validation check of the segment numbers */
	/* general extent cache stats */
	for (i = 0; i < NR_EXTENT_CACHES; i++) {
		struct extent_tree_info *eti = &sbi->extent_tree[i];

		si->hit_cached[i] = atomic64_read(&sbi->read_hit_cached[i]);
		si->hit_rbtree[i] = atomic64_read(&sbi->read_hit_rbtree[i]);
		si->total_ext[i] = atomic64_read(&sbi->total_hit_ext[i]);
		si->hit_total[i] = si->hit_cached[i] + si->hit_rbtree[i];
		si->ext_tree[i] = atomic_read(&eti->total_ext_tree);
		si->zombie_tree[i] = atomic_read(&eti->total_zombie_tree);
		si->ext_node[i] = atomic_read(&eti->total_ext_node);
	}
	/* read extent_cache only */
	si->hit_largest = atomic64_read(&sbi->read_hit_largest);
	si->hit_cached = atomic64_read(&sbi->read_hit_cached);
	si->hit_rbtree = atomic64_read(&sbi->read_hit_rbtree);
	si->hit_total = si->hit_largest + si->hit_cached + si->hit_rbtree;
	si->total_ext = atomic64_read(&sbi->total_hit_ext);
	si->ext_tree = atomic_read(&sbi->total_ext_tree);
	si->zombie_tree = atomic_read(&sbi->total_zombie_tree);
	si->ext_node = atomic_read(&sbi->total_ext_node);
	si->hit_total[EX_READ] += si->hit_largest;

	/* validation check of the segment numbers */
	si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES);
	si->ndirty_dent = get_pages(sbi, F2FS_DIRTY_DENTS);
	si->ndirty_meta = get_pages(sbi, F2FS_DIRTY_META);
@@ -294,10 +302,16 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
				sizeof(struct nat_entry_set);
	for (i = 0; i < MAX_INO_ENTRY; i++)
		si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry);
	si->cache_mem += atomic_read(&sbi->total_ext_tree) *

	for (i = 0; i < NR_EXTENT_CACHES; i++) {
		struct extent_tree_info *eti = &sbi->extent_tree[i];

		si->ext_mem[i] = atomic_read(&eti->total_ext_tree) *
						sizeof(struct extent_tree);
	si->cache_mem += atomic_read(&sbi->total_ext_node) *
		si->ext_mem[i] += atomic_read(&eti->total_ext_node) *
						sizeof(struct extent_node);
		si->cache_mem += si->ext_mem[i];
	}

	si->page_mem = 0;
	if (sbi->node_inode) {
@@ -490,16 +504,18 @@ static int stat_show(struct seq_file *s, void *v)
				si->bg_node_blks);
		seq_printf(s, "BG skip : IO: %u, Other: %u\n",
				si->io_skip_bggc, si->other_skip_bggc);
		seq_puts(s, "\nExtent Cache:\n");
		seq_puts(s, "\nExtent Cache (Read):\n");
		seq_printf(s, "  - Hit Count: L1-1:%llu L1-2:%llu L2:%llu\n",
				si->hit_largest, si->hit_cached,
				si->hit_rbtree);
				si->hit_largest, si->hit_cached[EX_READ],
				si->hit_rbtree[EX_READ]);
		seq_printf(s, "  - Hit Ratio: %llu%% (%llu / %llu)\n",
				!si->total_ext ? 0 :
				div64_u64(si->hit_total * 100, si->total_ext),
				si->hit_total, si->total_ext);
				!si->total_ext[EX_READ] ? 0 :
				div64_u64(si->hit_total[EX_READ] * 100,
				si->total_ext[EX_READ]),
				si->hit_total[EX_READ], si->total_ext[EX_READ]);
		seq_printf(s, "  - Inner Struct Count: tree: %d(%d), node: %d\n",
				si->ext_tree, si->zombie_tree, si->ext_node);
				si->ext_tree[EX_READ], si->zombie_tree[EX_READ],
				si->ext_node[EX_READ]);
		seq_puts(s, "\nBalancing F2FS Async:\n");
		seq_printf(s, "  - DIO (R: %4d, W: %4d)\n",
			   si->nr_dio_read, si->nr_dio_write);
@@ -566,8 +582,10 @@ static int stat_show(struct seq_file *s, void *v)
			(si->base_mem + si->cache_mem + si->page_mem) >> 10);
		seq_printf(s, "  - static: %llu KB\n",
				si->base_mem >> 10);
		seq_printf(s, "  - cached: %llu KB\n",
		seq_printf(s, "  - cached all: %llu KB\n",
				si->cache_mem >> 10);
		seq_printf(s, "  - read extent cache: %llu KB\n",
				si->ext_mem[EX_READ] >> 10);
		seq_printf(s, "  - paged : %llu KB\n",
				si->page_mem >> 10);
	}
@@ -600,10 +618,15 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
	si->sbi = sbi;
	sbi->stat_info = si;

	atomic64_set(&sbi->total_hit_ext, 0);
	atomic64_set(&sbi->read_hit_rbtree, 0);
	/* general extent cache stats */
	for (i = 0; i < NR_EXTENT_CACHES; i++) {
		atomic64_set(&sbi->total_hit_ext[i], 0);
		atomic64_set(&sbi->read_hit_rbtree[i], 0);
		atomic64_set(&sbi->read_hit_cached[i], 0);
	}

	/* read extent_cache only */
	atomic64_set(&sbi->read_hit_largest, 0);
	atomic64_set(&sbi->read_hit_cached, 0);

	atomic_set(&sbi->inline_xattr, 0);
	atomic_set(&sbi->inline_inode, 0);
+276 −187

File changed.

Preview size limit exceeded, changes collapsed.

+77 −42
Original line number Diff line number Diff line
@@ -592,16 +592,22 @@ enum {
/* dirty segments threshold for triggering CP */
#define DEFAULT_DIRTY_THRESHOLD		4

#define RECOVERY_MAX_RA_BLOCKS		BIO_MAX_PAGES
#define RECOVERY_MIN_RA_BLOCKS		1

#define F2FS_ONSTACK_PAGES	16	/* nr of onstack pages */

/* for in-memory extent cache entry */
#define F2FS_MIN_EXTENT_LEN	64	/* minimum extent length */

/* number of extent info in extent cache we try to shrink */
#define READ_EXTENT_CACHE_SHRINK_NUMBER	128

#define RECOVERY_MAX_RA_BLOCKS		BIO_MAX_PAGES
#define RECOVERY_MIN_RA_BLOCKS		1

#define F2FS_ONSTACK_PAGES	16	/* nr of onstack pages */
/* extent cache type */
enum extent_type {
	EX_READ,
	NR_EXTENT_CACHES,
};

struct rb_entry {
	struct rb_node rb_node;		/* rb node located in rb-tree */
@@ -617,11 +623,18 @@ struct rb_entry {
struct extent_info {
	unsigned int fofs;		/* start offset in a file */
	unsigned int len;		/* length of the extent */
	block_t blk;			/* start block address of the extent */
	union {
		/* read extent_cache */
		struct {
			/* start block address of the extent */
			block_t blk;
#ifdef CONFIG_F2FS_FS_COMPRESSION
	unsigned int c_len;		/* physical extent length of compressed blocks */
			/* physical extent length of compressed blocks */
			unsigned int c_len;
#endif
		};
	};
};

struct extent_node {
	struct rb_node rb_node;		/* rb node located in rb-tree */
@@ -632,13 +645,25 @@ struct extent_node {

struct extent_tree {
	nid_t ino;			/* inode number */
	enum extent_type type;		/* keep the extent tree type */
	struct rb_root_cached root;	/* root of extent info rb-tree */
	struct extent_node *cached_en;	/* recently accessed extent node */
	struct extent_info largest;	/* largested extent info */
	struct list_head list;		/* to be used by sbi->zombie_list */
	rwlock_t lock;			/* protect extent info rb-tree */
	atomic_t node_cnt;		/* # of extent node in rb-tree*/
	bool largest_updated;		/* largest extent updated */
	struct extent_info largest;	/* largest cached extent for EX_READ */
};

struct extent_tree_info {
	struct radix_tree_root extent_tree_root;/* cache extent cache entries */
	struct mutex extent_tree_lock;	/* locking extent radix tree */
	struct list_head extent_list;		/* lru list for shrinker */
	spinlock_t extent_lock;			/* locking extent lru list */
	atomic_t total_ext_tree;		/* extent tree count */
	struct list_head zombie_list;		/* extent zombie tree list */
	atomic_t total_zombie_tree;		/* extent zombie tree count */
	atomic_t total_ext_node;		/* extent info count */
};

/*
@@ -801,7 +826,8 @@ struct f2fs_inode_info {
	struct list_head dirty_list;	/* dirty list for dirs and files */
	struct list_head gdirty_list;	/* linked in global dirty list */
	struct task_struct *atomic_write_task;	/* store atomic write task */
	struct extent_tree *extent_tree;	/* cached extent_tree entry */
	struct extent_tree *extent_tree[NR_EXTENT_CACHES];
					/* cached extent_tree entry */
	pgoff_t ra_offset;		/* ongoing readahead offset */
	struct inode *cow_inode;	/* copy-on-write inode for atomic write */

@@ -1625,14 +1651,7 @@ struct f2fs_sb_info {
	struct mutex flush_lock;		/* for flush exclusion */

	/* for extent tree cache */
	struct radix_tree_root extent_tree_root;/* cache extent cache entries */
	struct mutex extent_tree_lock;	/* locking extent radix tree */
	struct list_head extent_list;		/* lru list for shrinker */
	spinlock_t extent_lock;			/* locking extent lru list */
	atomic_t total_ext_tree;		/* extent tree count */
	struct list_head zombie_list;		/* extent zombie tree list */
	atomic_t total_zombie_tree;		/* extent zombie tree count */
	atomic_t total_ext_node;		/* extent info count */
	struct extent_tree_info extent_tree[NR_EXTENT_CACHES];

	/* basic filesystem units */
	unsigned int log_sectors_per_block;	/* log2 sectors per block */
@@ -1717,10 +1736,14 @@ struct f2fs_sb_info {
	unsigned int segment_count[2];		/* # of allocated segments */
	unsigned int block_count[2];		/* # of allocated blocks */
	atomic_t inplace_count;		/* # of inplace update */
	atomic64_t total_hit_ext;		/* # of lookup extent cache */
	atomic64_t read_hit_rbtree;		/* # of hit rbtree extent node */
	atomic64_t read_hit_largest;		/* # of hit largest extent node */
	atomic64_t read_hit_cached;		/* # of hit cached extent node */
	/* # of lookup extent cache */
	atomic64_t total_hit_ext[NR_EXTENT_CACHES];
	/* # of hit rbtree extent node */
	atomic64_t read_hit_rbtree[NR_EXTENT_CACHES];
	/* # of hit cached extent node */
	atomic64_t read_hit_cached[NR_EXTENT_CACHES];
	/* # of hit largest extent node in read extent cache */
	atomic64_t read_hit_largest;
	atomic_t inline_xattr;			/* # of inline_xattr inodes */
	atomic_t inline_inode;			/* # of inline_data inodes */
	atomic_t inline_dir;			/* # of inline_dentry inodes */
@@ -3867,9 +3890,17 @@ struct f2fs_stat_info {
	struct f2fs_sb_info *sbi;
	int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
	int main_area_segs, main_area_sections, main_area_zones;
	unsigned long long hit_largest, hit_cached, hit_rbtree;
	unsigned long long hit_total, total_ext;
	int ext_tree, zombie_tree, ext_node;
	unsigned long long hit_cached[NR_EXTENT_CACHES];
	unsigned long long hit_rbtree[NR_EXTENT_CACHES];
	unsigned long long total_ext[NR_EXTENT_CACHES];
	unsigned long long hit_total[NR_EXTENT_CACHES];
	int ext_tree[NR_EXTENT_CACHES];
	int zombie_tree[NR_EXTENT_CACHES];
	int ext_node[NR_EXTENT_CACHES];
	/* to count memory footprint */
	unsigned long long ext_mem[NR_EXTENT_CACHES];
	/* for read extent cache */
	unsigned long long hit_largest;
	int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta;
	int ndirty_data, ndirty_qdata;
	unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all;
@@ -3928,10 +3959,10 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
#define stat_other_skip_bggc_count(sbi)	((sbi)->other_skip_bggc++)
#define stat_inc_dirty_inode(sbi, type)	((sbi)->ndirty_inode[type]++)
#define stat_dec_dirty_inode(sbi, type)	((sbi)->ndirty_inode[type]--)
#define stat_inc_total_hit(sbi)		(atomic64_inc(&(sbi)->total_hit_ext))
#define stat_inc_rbtree_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_rbtree))
#define stat_inc_total_hit(sbi, type)		(atomic64_inc(&(sbi)->total_hit_ext[type]))
#define stat_inc_rbtree_node_hit(sbi, type)	(atomic64_inc(&(sbi)->read_hit_rbtree[type]))
#define stat_inc_largest_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_largest))
#define stat_inc_cached_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_cached))
#define stat_inc_cached_node_hit(sbi, type)	(atomic64_inc(&(sbi)->read_hit_cached[type]))
#define stat_inc_inline_xattr(inode)					\
	do {								\
		if (f2fs_has_inline_xattr(inode))			\
@@ -4054,10 +4085,10 @@ void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
#define stat_other_skip_bggc_count(sbi)			do { } while (0)
#define stat_inc_dirty_inode(sbi, type)			do { } while (0)
#define stat_dec_dirty_inode(sbi, type)			do { } while (0)
#define stat_inc_total_hit(sbi)				do { } while (0)
#define stat_inc_rbtree_node_hit(sbi)			do { } while (0)
#define stat_inc_total_hit(sbi, type)			do { } while (0)
#define stat_inc_rbtree_node_hit(sbi, type)		do { } while (0)
#define stat_inc_largest_node_hit(sbi)			do { } while (0)
#define stat_inc_cached_node_hit(sbi)			do { } while (0)
#define stat_inc_cached_node_hit(sbi, type)		do { } while (0)
#define stat_inc_inline_xattr(inode)			do { } while (0)
#define stat_dec_inline_xattr(inode)			do { } while (0)
#define stat_inc_inline_inode(inode)			do { } while (0)
@@ -4163,20 +4194,23 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
		bool force, bool *leftmost);
bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
				struct rb_root_cached *root, bool check_key);
unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
void f2fs_init_extent_tree(struct inode *inode, struct page *ipage);
void f2fs_drop_extent_tree(struct inode *inode);
unsigned int f2fs_destroy_extent_node(struct inode *inode);
void f2fs_destroy_extent_node(struct inode *inode);
void f2fs_destroy_extent_tree(struct inode *inode);
bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
			struct extent_info *ei);
void f2fs_update_extent_cache(struct dnode_of_data *dn);
void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
			pgoff_t fofs, block_t blkaddr, unsigned int len);
void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi);
int __init f2fs_create_extent_cache(void);
void f2fs_destroy_extent_cache(void);

/* read extent cache ops */
bool f2fs_lookup_read_extent_cache(struct inode *inode, pgoff_t pgofs,
			struct extent_info *ei);
void f2fs_update_read_extent_cache(struct dnode_of_data *dn);
void f2fs_update_read_extent_cache_range(struct dnode_of_data *dn,
			pgoff_t fofs, block_t blkaddr, unsigned int len);
unsigned int f2fs_shrink_read_extent_tree(struct f2fs_sb_info *sbi,
			int nr_shrink);

/*
 * sysfs.c
 */
@@ -4243,9 +4277,9 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
						struct writeback_control *wbc,
						enum iostat_type io_type);
int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
void f2fs_update_extent_tree_range_compressed(struct inode *inode,
				pgoff_t fofs, block_t blkaddr, unsigned int llen,
				unsigned int c_len);
void f2fs_update_read_extent_tree_range_compressed(struct inode *inode,
				pgoff_t fofs, block_t blkaddr,
				unsigned int llen, unsigned int c_len);
int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
				unsigned nr_pages, sector_t *last_block_in_bio,
				bool is_readahead, bool for_write);
@@ -4326,9 +4360,10 @@ static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi,
static inline void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi,
							nid_t ino) { }
#define inc_compr_inode_stat(inode)		do { } while (0)
static inline void f2fs_update_extent_tree_range_compressed(struct inode *inode,
				pgoff_t fofs, block_t blkaddr, unsigned int llen,
				unsigned int c_len) { }
static inline void f2fs_update_read_extent_tree_range_compressed(
				struct inode *inode,
				pgoff_t fofs, block_t blkaddr,
				unsigned int llen, unsigned int c_len) { }
#endif

static inline int set_compress_context(struct inode *inode)
+4 −4
Original line number Diff line number Diff line
@@ -637,7 +637,7 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
		 */
		fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
							dn->inode) + ofs;
		f2fs_update_extent_cache_range(dn, fofs, 0, len);
		f2fs_update_read_extent_cache_range(dn, fofs, 0, len);
		dec_valid_block_count(sbi, dn->inode, nr_free);
	}
	dn->ofs_in_node = ofs;
@@ -1473,7 +1473,7 @@ static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
		f2fs_set_data_blkaddr(dn);
	}

	f2fs_update_extent_cache_range(dn, start, 0, index - start);
	f2fs_update_read_extent_cache_range(dn, start, 0, index - start);

	return ret;
}
@@ -2595,7 +2595,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
	struct f2fs_map_blocks map = { .m_next_extent = NULL,
					.m_seg_type = NO_CHECK_TYPE,
					.m_may_create = false };
	struct extent_info ei = {0, 0, 0};
	struct extent_info ei = {0, };
	pgoff_t pg_start, pg_end, next_pgofs;
	unsigned int blk_per_seg = sbi->blocks_per_seg;
	unsigned int total = 0, sec_num;
@@ -2627,7 +2627,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
	 * lookup mapping info in extent cache, skip defragmenting if physical
	 * block addresses are continuous.
	 */
	if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
	if (f2fs_lookup_read_extent_cache(inode, pg_start, &ei)) {
		if (ei.fofs + ei.len >= pg_end)
			goto out;
	}
Loading