Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0fcb9d21 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull f2fs updates from Jaegeuk Kim:
 "Most part of the patches include enhancing the stability and
  performance of in-memory extent caches feature.

  In addition, it introduces several new features and configurable
  points:
   - F2FS_GOING_DOWN_METAFLUSH ioctl to test power failures
   - F2FS_IOC_WRITE_CHECKPOINT ioctl to trigger checkpoint by users
   - background_gc=sync mount option to do gc synchronously
   - periodic checkpoints
   - sysfs entry to control readahead blocks for free nids

  And the following bug fixes have been merged.
   - fix SSA corruption by collapse/insert_range
   - correct a couple of gc behaviors
   - fix the results of f2fs_map_blocks
   - fix error case handling of volatile/atomic writes"

* tag 'for-f2fs-4.4' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (54 commits)
  f2fs: fix to skip shrinking extent nodes
  f2fs: fix error path of ->symlink
  f2fs: fix to clear GCed flag for atomic written page
  f2fs: don't need to submit bio on error case
  f2fs: fix leakage of inmemory atomic pages
  f2fs: refactor __find_rev_next_{zero}_bit
  f2fs: support fiemap for inline_data
  f2fs: flush dirty data for bmap
  f2fs: relocate the tracepoint for background_gc
  f2fs crypto: fix racing of accessing encrypted page among
  f2fs: export ra_nid_pages to sysfs
  f2fs: readahead for free nids building
  f2fs: support lower priority asynchronous readahead in ra_meta_pages
  f2fs: don't tag REQ_META for temporary non-meta pages
  f2fs: add a tracepoint for f2fs_read_data_pages
  f2fs: set GFP_NOFS for grab_cache_page
  f2fs: fix SSA updates resulting in corruption
  Revert "f2fs: do not skip dentry block writes"
  f2fs: add F2FS_GOING_DOWN_METAFLUSH to test power-failure
  f2fs: merge meta writes as many possible
  ...
parents d000f8d6 beaa57dd
Loading
Loading
Loading
Loading
+12 −0
Original line number Diff line number Diff line
@@ -80,3 +80,15 @@ Date: February 2015
Contact:	"Jaegeuk Kim" <jaegeuk@kernel.org>
Description:
		 Controls the trimming rate in batch mode.

What:		/sys/fs/f2fs/<disk>/cp_interval
Date:		October 2015
Contact:	"Jaegeuk Kim" <jaegeuk@kernel.org>
Description:
		 Controls the checkpoint timing.

What:		/sys/fs/f2fs/<disk>/ra_nid_pages
Date:		October 2015
Contact:	"Chao Yu" <chao2.yu@samsung.com>
Description:
		 Controls the count of nid pages to be readaheaded.
+2 −1
Original line number Diff line number Diff line
@@ -102,7 +102,8 @@ background_gc=%s Turn on/off cleaning operations, namely garbage
                       collection, triggered in background when I/O subsystem is
                       idle. If background_gc=on, it will turn on the garbage
                       collection and if background_gc=off, garbage collection
                       will be truned off.
                       will be truned off. If background_gc=sync, it will turn
                       on synchronous garbage collection running in background.
                       Default value for this option is on. So garbage
                       collection is on by default.
disable_roll_forward   Disable the roll-forward recovery routine
+42 −7
Original line number Diff line number Diff line
@@ -47,7 +47,8 @@ struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
/*
 * We guarantee no failure on the returned page.
 */
struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
							bool is_meta)
{
	struct address_space *mapping = META_MAPPING(sbi);
	struct page *page;
@@ -58,6 +59,9 @@ struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
		.blk_addr = index,
		.encrypted_page = NULL,
	};

	if (unlikely(!is_meta))
		fio.rw &= ~REQ_META;
repeat:
	page = grab_cache_page(mapping, index);
	if (!page) {
@@ -91,6 +95,17 @@ struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
	return page;
}

struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
{
	return __get_meta_page(sbi, index, true);
}

/* for POR only */
struct page *get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index)
{
	return __get_meta_page(sbi, index, false);
}

bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type)
{
	switch (type) {
@@ -125,7 +140,8 @@ bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type)
/*
 * Readahead CP/NAT/SIT/SSA pages
 */
int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int type)
int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
							int type, bool sync)
{
	block_t prev_blk_addr = 0;
	struct page *page;
@@ -133,10 +149,13 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int type
	struct f2fs_io_info fio = {
		.sbi = sbi,
		.type = META,
		.rw = READ_SYNC | REQ_META | REQ_PRIO,
		.rw = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : READA,
		.encrypted_page = NULL,
	};

	if (unlikely(type == META_POR))
		fio.rw &= ~REQ_META;

	for (; nrpages-- > 0; blkno++) {

		if (!is_valid_blkaddr(sbi, blkno, type))
@@ -196,7 +215,7 @@ void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
	f2fs_put_page(page, 0);

	if (readahead)
		ra_meta_pages(sbi, index, MAX_BIO_BLOCKS(sbi), META_POR);
		ra_meta_pages(sbi, index, MAX_BIO_BLOCKS(sbi), META_POR, true);
}

static int f2fs_write_meta_page(struct page *page,
@@ -257,7 +276,7 @@ long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
						long nr_to_write)
{
	struct address_space *mapping = META_MAPPING(sbi);
	pgoff_t index = 0, end = LONG_MAX;
	pgoff_t index = 0, end = LONG_MAX, prev = LONG_MAX;
	struct pagevec pvec;
	long nwritten = 0;
	struct writeback_control wbc = {
@@ -277,6 +296,13 @@ long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];

			if (prev == LONG_MAX)
				prev = page->index - 1;
			if (nr_to_write != LONG_MAX && page->index != prev + 1) {
				pagevec_release(&pvec);
				goto stop;
			}

			lock_page(page);

			if (unlikely(page->mapping != mapping)) {
@@ -297,13 +323,14 @@ long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
				break;
			}
			nwritten++;
			prev = page->index;
			if (unlikely(nwritten >= nr_to_write))
				break;
		}
		pagevec_release(&pvec);
		cond_resched();
	}

stop:
	if (nwritten)
		f2fs_submit_merged_bio(sbi, type, WRITE);

@@ -495,7 +522,7 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi)
	start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
	orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);

	ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP);
	ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true);

	for (i = 0; i < orphan_blocks; i++) {
		struct page *page = get_meta_page(sbi, start_blk + i);
@@ -1000,6 +1027,11 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)

	start_blk = __start_cp_addr(sbi);

	/* need to wait for end_io results */
	wait_on_all_pages_writeback(sbi);
	if (unlikely(f2fs_cp_error(sbi)))
		return;

	/* write out checkpoint buffer at block 0 */
	update_meta_page(sbi, ckpt, start_blk++);

@@ -1109,6 +1141,9 @@ void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
	if (cpc->reason == CP_RECOVERY)
		f2fs_msg(sbi->sb, KERN_NOTICE,
			"checkpoint: version = %llx", ckpt_ver);

	/* do checkpoint periodically */
	sbi->cp_expires = round_jiffies_up(jiffies + HZ * sbi->cp_interval);
out:
	mutex_unlock(&sbi->cp_mutex);
	trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
+107 −69
Original line number Diff line number Diff line
@@ -275,7 +275,8 @@ int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
	return f2fs_reserve_block(dn, index);
}

struct page *get_read_data_page(struct inode *inode, pgoff_t index, int rw)
struct page *get_read_data_page(struct inode *inode, pgoff_t index,
						int rw, bool for_write)
{
	struct address_space *mapping = inode->i_mapping;
	struct dnode_of_data dn;
@@ -292,7 +293,7 @@ struct page *get_read_data_page(struct inode *inode, pgoff_t index, int rw)
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		return read_mapping_page(mapping, index, NULL);

	page = grab_cache_page(mapping, index);
	page = f2fs_grab_cache_page(mapping, index, for_write);
	if (!page)
		return ERR_PTR(-ENOMEM);

@@ -352,7 +353,7 @@ struct page *find_data_page(struct inode *inode, pgoff_t index)
		return page;
	f2fs_put_page(page, 0);

	page = get_read_data_page(inode, index, READ_SYNC);
	page = get_read_data_page(inode, index, READ_SYNC, false);
	if (IS_ERR(page))
		return page;

@@ -372,12 +373,13 @@ struct page *find_data_page(struct inode *inode, pgoff_t index)
 * Because, the callers, functions in dir.c and GC, should be able to know
 * whether this page exists or not.
 */
struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
							bool for_write)
{
	struct address_space *mapping = inode->i_mapping;
	struct page *page;
repeat:
	page = get_read_data_page(inode, index, READ_SYNC);
	page = get_read_data_page(inode, index, READ_SYNC, for_write);
	if (IS_ERR(page))
		return page;

@@ -411,7 +413,7 @@ struct page *get_new_data_page(struct inode *inode,
	struct dnode_of_data dn;
	int err;
repeat:
	page = grab_cache_page(mapping, index);
	page = f2fs_grab_cache_page(mapping, index, true);
	if (!page) {
		/*
		 * before exiting, we should make sure ipage will be released
@@ -439,7 +441,7 @@ struct page *get_new_data_page(struct inode *inode,
	} else {
		f2fs_put_page(page, 1);

		page = get_read_data_page(inode, index, READ_SYNC);
		page = get_read_data_page(inode, index, READ_SYNC, true);
		if (IS_ERR(page))
			goto repeat;

@@ -447,9 +449,9 @@ struct page *get_new_data_page(struct inode *inode,
		lock_page(page);
	}
got_it:
	if (new_i_size &&
		i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
		i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
	if (new_i_size && i_size_read(inode) <
				((loff_t)(index + 1) << PAGE_CACHE_SHIFT)) {
		i_size_write(inode, ((loff_t)(index + 1) << PAGE_CACHE_SHIFT));
		/* Only the directory inode sets new_i_size */
		set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
	}
@@ -489,8 +491,9 @@ static int __allocate_data_block(struct dnode_of_data *dn)
	/* update i_size */
	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
							dn->ofs_in_node;
	if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT))
		i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT));
	if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT))
		i_size_write(dn->inode,
				((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT));

	/* direct IO doesn't use extent cache to maximize the performance */
	f2fs_drop_largest_extent(dn->inode, fofs);
@@ -523,6 +526,9 @@ static void __allocate_data_blocks(struct inode *inode, loff_t offset,
		while (dn.ofs_in_node < end_offset && len) {
			block_t blkaddr;

			if (unlikely(f2fs_cp_error(sbi)))
				goto sync_out;

			blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
			if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) {
				if (__allocate_data_block(&dn))
@@ -565,6 +571,7 @@ static int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
{
	unsigned int maxblocks = map->m_len;
	struct dnode_of_data dn;
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
	pgoff_t pgofs, end_offset;
	int err = 0, ofs = 1;
@@ -595,40 +602,40 @@ static int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
			err = 0;
		goto unlock_out;
	}
	if (dn.data_blkaddr == NEW_ADDR) {
		if (flag == F2FS_GET_BLOCK_BMAP) {
			err = -ENOENT;
			goto put_out;
		} else if (flag == F2FS_GET_BLOCK_READ ||
				flag == F2FS_GET_BLOCK_DIO) {

	if (dn.data_blkaddr == NEW_ADDR || dn.data_blkaddr == NULL_ADDR) {
		if (create) {
			if (unlikely(f2fs_cp_error(sbi))) {
				err = -EIO;
				goto put_out;
			}
		/*
		 * if it is in fiemap call path (flag = F2FS_GET_BLOCK_FIEMAP),
		 * mark it as mapped and unwritten block.
		 */
	}

	if (dn.data_blkaddr != NULL_ADDR) {
		map->m_flags = F2FS_MAP_MAPPED;
		map->m_pblk = dn.data_blkaddr;
		if (dn.data_blkaddr == NEW_ADDR)
			map->m_flags |= F2FS_MAP_UNWRITTEN;
	} else if (create) {
			err = __allocate_data_block(&dn);
			if (err)
				goto put_out;
			allocated = true;
		map->m_flags = F2FS_MAP_NEW | F2FS_MAP_MAPPED;
		map->m_pblk = dn.data_blkaddr;
			map->m_flags = F2FS_MAP_NEW;
		} else {
			if (flag != F2FS_GET_BLOCK_FIEMAP ||
						dn.data_blkaddr != NEW_ADDR) {
				if (flag == F2FS_GET_BLOCK_BMAP)
					err = -ENOENT;
				goto put_out;
			}

	end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
			/*
			 * preallocated unwritten block should be mapped
			 * for fiemap.
			 */
			if (dn.data_blkaddr == NEW_ADDR)
				map->m_flags = F2FS_MAP_UNWRITTEN;
		}
	}

	map->m_flags |= F2FS_MAP_MAPPED;
	map->m_pblk = dn.data_blkaddr;
	map->m_len = 1;

	end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
	dn.ofs_in_node++;
	pgofs++;

@@ -647,23 +654,35 @@ static int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
			goto unlock_out;
		}

		if (dn.data_blkaddr == NEW_ADDR &&
				flag != F2FS_GET_BLOCK_FIEMAP)
			goto put_out;

		end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
	}

	if (maxblocks > map->m_len) {
		block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
		if (blkaddr == NULL_ADDR && create) {

		if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
			if (create) {
				if (unlikely(f2fs_cp_error(sbi))) {
					err = -EIO;
					goto sync_out;
				}
				err = __allocate_data_block(&dn);
				if (err)
					goto sync_out;
				allocated = true;
				map->m_flags |= F2FS_MAP_NEW;
				blkaddr = dn.data_blkaddr;
			} else {
				/*
				 * we only merge preallocated unwritten blocks
				 * for fiemap.
				 */
				if (flag != F2FS_GET_BLOCK_FIEMAP ||
						blkaddr != NEW_ADDR)
					goto sync_out;
			}
		}

		/* Give more consecutive addresses for the readahead */
		if ((map->m_pblk != NEW_ADDR &&
				blkaddr == (map->m_pblk + ofs)) ||
@@ -752,6 +771,12 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
	if (ret)
		return ret;

	if (f2fs_has_inline_data(inode)) {
		ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
		if (ret != -EAGAIN)
			return ret;
	}

	mutex_lock(&inode->i_mutex);

	if (len >= isize) {
@@ -903,7 +928,8 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
			map.m_lblk = block_in_file;
			map.m_len = last_block - block_in_file;

			if (f2fs_map_blocks(inode, &map, 0, false))
			if (f2fs_map_blocks(inode, &map, 0,
							F2FS_GET_BLOCK_READ))
				goto set_error_page;
		}
got_it:
@@ -936,21 +962,14 @@ static int f2fs_mpage_readpages(struct address_space *mapping,

			if (f2fs_encrypted_inode(inode) &&
					S_ISREG(inode->i_mode)) {
				struct page *cpage;

				ctx = f2fs_get_crypto_ctx(inode);
				if (IS_ERR(ctx))
					goto set_error_page;

				/* wait the page to be moved by cleaning */
				cpage = find_lock_page(
						META_MAPPING(F2FS_I_SB(inode)),
						block_nr);
				if (cpage) {
					f2fs_wait_on_page_writeback(cpage,
									DATA);
					f2fs_put_page(cpage, 1);
				}
				f2fs_wait_on_encrypted_page_writeback(
						F2FS_I_SB(inode), block_nr);
			}

			bio = bio_alloc(GFP_KERNEL,
@@ -1012,6 +1031,9 @@ static int f2fs_read_data_pages(struct file *file,
			struct list_head *pages, unsigned nr_pages)
{
	struct inode *inode = file->f_mapping->host;
	struct page *page = list_entry(pages->prev, struct page, lru);

	trace_f2fs_readpages(inode, page, nr_pages);

	/* If the file has inline data, skip readpages */
	if (f2fs_has_inline_data(inode))
@@ -1041,6 +1063,11 @@ int do_write_data_page(struct f2fs_io_info *fio)
	}

	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {

		/* wait for GCed encrypted page writeback */
		f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
							fio->blk_addr);

		fio->encrypted_page = f2fs_encrypt(inode, fio->page);
		if (IS_ERR(fio->encrypted_page)) {
			err = PTR_ERR(fio->encrypted_page);
@@ -1429,6 +1456,10 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,

	f2fs_wait_on_page_writeback(page, DATA);

	/* wait for GCed encrypted page writeback */
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr);

	if (len == PAGE_CACHE_SIZE)
		goto out_update;
	if (PageUptodate(page))
@@ -1551,10 +1582,16 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,

	trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));

	if (iov_iter_rw(iter) == WRITE)
	if (iov_iter_rw(iter) == WRITE) {
		__allocate_data_blocks(inode, offset, count);
		if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
			err = -EIO;
			goto out;
		}
	}

	err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio);
out:
	if (err < 0 && iov_iter_rw(iter) == WRITE)
		f2fs_write_failed(mapping, offset + count);

@@ -1636,12 +1673,13 @@ static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
	struct inode *inode = mapping->host;

	/* we don't need to use inline_data strictly */
	if (f2fs_has_inline_data(inode)) {
		int err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
	}
	if (f2fs_has_inline_data(inode))
		return 0;

	/* make sure allocating whole blocks */
	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
		filemap_write_and_wait(mapping);

	return generic_block_bmap(mapping, block, get_data_block_bmap);
}

+18 −18
Original line number Diff line number Diff line
@@ -33,11 +33,11 @@ static void update_general_status(struct f2fs_sb_info *sbi)
	int i;

	/* validation check of the segment numbers */
	si->hit_largest = atomic_read(&sbi->read_hit_largest);
	si->hit_cached = atomic_read(&sbi->read_hit_cached);
	si->hit_rbtree = atomic_read(&sbi->read_hit_rbtree);
	si->hit_largest = atomic64_read(&sbi->read_hit_largest);
	si->hit_cached = atomic64_read(&sbi->read_hit_cached);
	si->hit_rbtree = atomic64_read(&sbi->read_hit_rbtree);
	si->hit_total = si->hit_largest + si->hit_cached + si->hit_rbtree;
	si->total_ext = atomic_read(&sbi->total_hit_ext);
	si->total_ext = atomic64_read(&sbi->total_hit_ext);
	si->ext_tree = sbi->total_ext_tree;
	si->ext_node = atomic_read(&sbi->total_ext_node);
	si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES);
@@ -118,7 +118,7 @@ static void update_sit_info(struct f2fs_sb_info *sbi)
		}
	}
	dist = div_u64(MAIN_SECS(sbi) * hblks_per_sec * hblks_per_sec, 100);
	si->bimodal = div_u64(bimodal, dist);
	si->bimodal = div64_u64(bimodal, dist);
	if (si->dirty_count)
		si->avg_vblocks = div_u64(total_vblocks, ndirty);
	else
@@ -198,9 +198,9 @@ static void update_mem_info(struct f2fs_sb_info *sbi)

	si->page_mem = 0;
	npages = NODE_MAPPING(sbi)->nrpages;
	si->page_mem += npages << PAGE_CACHE_SHIFT;
	si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT;
	npages = META_MAPPING(sbi)->nrpages;
	si->page_mem += npages << PAGE_CACHE_SHIFT;
	si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT;
}

static int stat_show(struct seq_file *s, void *v)
@@ -283,12 +283,12 @@ static int stat_show(struct seq_file *s, void *v)
		seq_printf(s, "  - node blocks : %d (%d)\n", si->node_blks,
				si->bg_node_blks);
		seq_puts(s, "\nExtent Cache:\n");
		seq_printf(s, "  - Hit Count: L1-1:%d L1-2:%d L2:%d\n",
		seq_printf(s, "  - Hit Count: L1-1:%llu L1-2:%llu L2:%llu\n",
				si->hit_largest, si->hit_cached,
				si->hit_rbtree);
		seq_printf(s, "  - Hit Ratio: %d%% (%d / %d)\n",
		seq_printf(s, "  - Hit Ratio: %llu%% (%llu / %llu)\n",
				!si->total_ext ? 0 :
				(si->hit_total * 100) / si->total_ext,
				div64_u64(si->hit_total * 100, si->total_ext),
				si->hit_total, si->total_ext);
		seq_printf(s, "  - Inner Struct Count: tree: %d, node: %d\n",
				si->ext_tree, si->ext_node);
@@ -333,13 +333,13 @@ static int stat_show(struct seq_file *s, void *v)

		/* memory footprint */
		update_mem_info(si->sbi);
		seq_printf(s, "\nMemory: %u KB\n",
		seq_printf(s, "\nMemory: %llu KB\n",
			(si->base_mem + si->cache_mem + si->page_mem) >> 10);
		seq_printf(s, "  - static: %u KB\n",
		seq_printf(s, "  - static: %llu KB\n",
				si->base_mem >> 10);
		seq_printf(s, "  - cached: %u KB\n",
		seq_printf(s, "  - cached: %llu KB\n",
				si->cache_mem >> 10);
		seq_printf(s, "  - paged : %u KB\n",
		seq_printf(s, "  - paged : %llu KB\n",
				si->page_mem >> 10);
	}
	mutex_unlock(&f2fs_stat_mutex);
@@ -378,10 +378,10 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
	si->sbi = sbi;
	sbi->stat_info = si;

	atomic_set(&sbi->total_hit_ext, 0);
	atomic_set(&sbi->read_hit_rbtree, 0);
	atomic_set(&sbi->read_hit_largest, 0);
	atomic_set(&sbi->read_hit_cached, 0);
	atomic64_set(&sbi->total_hit_ext, 0);
	atomic64_set(&sbi->read_hit_rbtree, 0);
	atomic64_set(&sbi->read_hit_largest, 0);
	atomic64_set(&sbi->read_hit_cached, 0);

	atomic_set(&sbi->inline_xattr, 0);
	atomic_set(&sbi->inline_inode, 0);
Loading