Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9e6c14f4 authored by Jaegeuk Kim's avatar Jaegeuk Kim Committed by Alistair Strachan
Browse files

Merge upstream-f2fs-stable-linux-4.9.y into android-4.9



Additional urgent fixes on top of 5.0-rc1-4.9:
  f2fs: don't access node/meta inode mapping after iput
  f2fs: wait on atomic writes to count F2FS_CP_WB_DATA

* origin/upstream-f2fs-stable-linux-4.9.y:
  f2fs: don't access node/meta inode mapping after iput
  f2fs: wait on atomic writes to count F2FS_CP_WB_DATA
  f2fs: sanity check of xattr entry size
  f2fs: fix use-after-free issue when accessing sbi->stat_info
  f2fs: check PageWriteback flag for ordered case
  f2fs: fix validation of the block count in sanity_check_raw_super
  f2fs: fix missing unlock(sbi->gc_mutex)
  f2fs: clean up structure extent_node
  f2fs: fix block address for __check_sit_bitmap
  f2fs: fix sbi->extent_list corruption issue
  f2fs: clean up checkpoint flow
  f2fs: flush stale issued discard candidates
  f2fs: correct wrong spelling, issing_*
  f2fs: use kvmalloc, if kmalloc is failed
  f2fs: remove redundant comment of unused wio_mutex
  f2fs: fix to reorder set_page_dirty and wait_on_page_writeback
  f2fs: clear PG_writeback if IPU failed
  f2fs: add an ioctl() to explicitly trigger fsck later
  f2fs: avoid frequent costly fsck triggers
  f2fs: fix m_may_create to make OPU DIO write correctly
  f2fs: fix to update new block address correctly for OPU
  f2fs: adjust trace print in f2fs_get_victim() to cover all paths
  f2fs: fix to allow node segment for GC by ioctl path
  f2fs: make "f2fs_fault_name[]" const char *
  f2fs: read page index before freeing
  f2fs: fix wrong return value of f2fs_acl_create
  f2fs: avoid build warn of fall_through
  f2fs: fix race between write_checkpoint and write_begin
  f2fs: check memory boundary by insane namelen
  f2fs: only flush the single temp bio cache which owns the target page
  f2fs: fix out-place-update DIO write
  f2fs: fix to be aware discard/preflush/dio command in is_idle()
  f2fs: add to account direct IO
  f2fs: move dir data flush to write checkpoint process
  f2fs: change segment to section in f2fs_ioc_gc_range
  f2fs: export migration_granularity sysfs entry
  f2fs: support subsectional garbage collection
  f2fs: introduce __is_large_section() for cleanup
  f2fs: clean up f2fs_sb_has_##feature_name
  f2fs: remove codes of unused wio_mutex
  f2fs: fix count of seg_freed to make sec_freed correct
  f2fs: fix to account preflush command for noflush_merge mode
  f2fs: avoid GC causing encrypted file corrupted

Change-Id: I7bfeb214db53112b8a0b24a52c0cde81c315f51a
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@google.com>
parents ed0b11d2 ec19f649
Loading
Loading
Loading
Loading
+9 −0
Original line number Diff line number Diff line
@@ -92,6 +92,15 @@ Contact: "Jaegeuk Kim" <jaegeuk.kim@samsung.com>
Description:
		 Controls the number of trials to find a victim segment.

What:		/sys/fs/f2fs/<disk>/migration_granularity
Date:		October 2018
Contact:	"Chao Yu" <yuchao0@huawei.com>
Description:
		 Controls migration granularity of garbage collection on large
		 section, it can let GC move partial segment{s} of one section
		 in one GC cycle, so that dispersing heavy overhead GC to
		 multiple lightweight one.

What:		/sys/fs/f2fs/<disk>/dir_level
Date:		March 2014
Contact:	"Jaegeuk Kim" <jaegeuk.kim@samsung.com>
+11 −9
Original line number Diff line number Diff line
@@ -160,7 +160,7 @@ static void *f2fs_acl_to_disk(struct f2fs_sb_info *sbi,
	return (void *)f2fs_acl;

fail:
	kfree(f2fs_acl);
	kvfree(f2fs_acl);
	return ERR_PTR(-EINVAL);
}

@@ -190,7 +190,7 @@ static struct posix_acl *__f2fs_get_acl(struct inode *inode, int type,
		acl = NULL;
	else
		acl = ERR_PTR(retval);
	kfree(value);
	kvfree(value);

	return acl;
}
@@ -240,7 +240,7 @@ static int __f2fs_set_acl(struct inode *inode, int type,

	error = f2fs_setxattr(inode, name_index, "", value, size, ipage, 0);

	kfree(value);
	kvfree(value);
	if (!error)
		set_cached_acl(inode, type, acl);

@@ -352,12 +352,14 @@ static int f2fs_acl_create(struct inode *dir, umode_t *mode,
		return PTR_ERR(p);

	clone = f2fs_acl_clone(p, GFP_NOFS);
	if (!clone)
		goto no_mem;
	if (!clone) {
		ret = -ENOMEM;
		goto release_acl;
	}

	ret = f2fs_acl_create_masq(clone, mode);
	if (ret < 0)
		goto no_mem_clone;
		goto release_clone;

	if (ret == 0)
		posix_acl_release(clone);
@@ -371,11 +373,11 @@ static int f2fs_acl_create(struct inode *dir, umode_t *mode,

	return 0;

no_mem_clone:
release_clone:
	posix_acl_release(clone);
no_mem:
release_acl:
	posix_acl_release(p);
	return -ENOMEM;
	return ret;
}

int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage,
+12 −21
Original line number Diff line number Diff line
@@ -44,7 +44,7 @@ struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
		cond_resched();
		goto repeat;
	}
	f2fs_wait_on_page_writeback(page, META, true);
	f2fs_wait_on_page_writeback(page, META, true, true);
	if (!PageUptodate(page))
		SetPageUptodate(page);
	return page;
@@ -370,9 +370,8 @@ long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
				goto continue_unlock;
			}

			f2fs_wait_on_page_writeback(page, META, true);
			f2fs_wait_on_page_writeback(page, META, true, true);

			BUG_ON(PageWriteback(page));
			if (!clear_page_dirty_for_io(page))
				goto continue_unlock;

@@ -911,7 +910,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
	f2fs_put_page(cp1, 1);
	f2fs_put_page(cp2, 1);
fail_no_cp:
	kfree(sbi->ckpt);
	kvfree(sbi->ckpt);
	return -EINVAL;
}

@@ -1290,11 +1289,11 @@ static void commit_checkpoint(struct f2fs_sb_info *sbi,
	struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
	int err;

	f2fs_wait_on_page_writeback(page, META, true, true);

	memcpy(page_address(page), src, PAGE_SIZE);
	set_page_dirty(page);

	f2fs_wait_on_page_writeback(page, META, true);
	f2fs_bug_on(sbi, PageWriteback(page));
	set_page_dirty(page);
	if (unlikely(!clear_page_dirty_for_io(page)))
		f2fs_bug_on(sbi, 1);

@@ -1328,11 +1327,9 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
	int err;

	/* Flush all the NAT/SIT pages */
	while (get_pages(sbi, F2FS_DIRTY_META)) {
	f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
		if (unlikely(f2fs_cp_error(sbi)))
			break;
	}
	f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_META) &&
					!f2fs_cp_error(sbi));

	/*
	 * modify checkpoint
@@ -1405,14 +1402,6 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
		for (i = 0; i < nm_i->nat_bits_blocks; i++)
			f2fs_update_meta_page(sbi, nm_i->nat_bits +
					(i << F2FS_BLKSIZE_BITS), blk + i);

		/* Flush all the NAT BITS pages */
		while (get_pages(sbi, F2FS_DIRTY_META)) {
			f2fs_sync_meta_pages(sbi, META, LONG_MAX,
							FS_CP_META_IO);
			if (unlikely(f2fs_cp_error(sbi)))
				break;
		}
	}

	/* write out checkpoint buffer at block 0 */
@@ -1448,6 +1437,8 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)

	/* Here, we have one bio having CP pack except cp pack 2 page */
	f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
	f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_META) &&
					!f2fs_cp_error(sbi));

	/* wait for previous submitted meta pages writeback */
	f2fs_wait_on_all_pages_writeback(sbi);
@@ -1465,7 +1456,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
	 * invalidate intermediate page cache borrowed from meta inode
	 * which are used for migration of encrypted inode's blocks.
	 */
	if (f2fs_sb_has_encrypt(sbi->sb))
	if (f2fs_sb_has_encrypt(sbi))
		invalidate_mapping_pages(META_MAPPING(sbi),
				MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1);

+112 −48
Original line number Diff line number Diff line
@@ -370,29 +370,6 @@ static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
	return false;
}

static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
						struct page *page, nid_t ino,
						enum page_type type)
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
	enum temp_type temp;
	struct f2fs_bio_info *io;
	bool ret = false;

	for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
		io = sbi->write_io[btype] + temp;

		down_read(&io->io_rwsem);
		ret = __has_merged_page(io, inode, page, ino);
		up_read(&io->io_rwsem);

		/* TODO: use HOT temp only for meta pages now. */
		if (ret || btype == META)
			break;
	}
	return ret;
}

static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
				enum page_type type, enum temp_type temp)
{
@@ -418,12 +395,18 @@ static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
				nid_t ino, enum page_type type, bool force)
{
	enum temp_type temp;

	if (!force && !has_merged_page(sbi, inode, page, ino, type))
		return;
	bool ret = true;

	for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
		if (!force)	{
			enum page_type btype = PAGE_TYPE_OF_BIO(type);
			struct f2fs_bio_info *io = sbi->write_io[btype] + temp;

			down_read(&io->io_rwsem);
			ret = __has_merged_page(io, inode, page, ino);
			up_read(&io->io_rwsem);
		}
		if (ret)
			__f2fs_submit_merged_write(sbi, type, temp);

		/* TODO: use HOT temp only for meta pages now. */
@@ -641,7 +624,7 @@ static void __set_data_blkaddr(struct dnode_of_data *dn)
 */
void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
{
	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
	f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
	__set_data_blkaddr(dn);
	if (set_page_dirty(dn->node_page))
		dn->node_changed = true;
@@ -671,7 +654,7 @@ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
	trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
						dn->ofs_in_node, count);

	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
	f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);

	for (; count > 0; dn->ofs_in_node++) {
		block_t blkaddr = datablock_addr(dn->inode,
@@ -955,6 +938,9 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
			return err;
	}

	if (direct_io && allow_outplace_dio(inode, iocb, from))
		return 0;

	if (is_inode_flag_set(inode, FI_NO_PREALLOC))
		return 0;

@@ -968,6 +954,7 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
	map.m_next_pgofs = NULL;
	map.m_next_extent = NULL;
	map.m_seg_type = NO_CHECK_TYPE;
	map.m_may_create = true;

	if (direct_io) {
		map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
@@ -1026,7 +1013,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
	unsigned int maxblocks = map->m_len;
	struct dnode_of_data dn;
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	int mode = create ? ALLOC_NODE : LOOKUP_NODE;
	int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
	pgoff_t pgofs, end_offset, end;
	int err = 0, ofs = 1;
	unsigned int ofs_in_node, last_ofs_in_node;
@@ -1046,6 +1033,10 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
	end = pgofs + maxblocks;

	if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
		if (test_opt(sbi, LFS) && flag == F2FS_GET_BLOCK_DIO &&
							map->m_may_create)
			goto next_dnode;

		map->m_pblk = ei.blk + pgofs - ei.fofs;
		map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
		map->m_flags = F2FS_MAP_MAPPED;
@@ -1060,7 +1051,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
	}

next_dnode:
	if (create)
	if (map->m_may_create)
		__do_map_lock(sbi, flag, true);

	/* When reading holes, we need its node page */
@@ -1097,12 +1088,14 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,

	if (is_valid_data_blkaddr(sbi, blkaddr)) {
		/* use out-place-update for driect IO under LFS mode */
		if (test_opt(sbi, LFS) && create &&
				flag == F2FS_GET_BLOCK_DIO) {
		if (test_opt(sbi, LFS) && flag == F2FS_GET_BLOCK_DIO &&
							map->m_may_create) {
			err = __allocate_data_block(&dn, map->m_seg_type);
			if (!err)
			if (!err) {
				blkaddr = dn.data_blkaddr;
				set_inode_flag(inode, FI_APPEND_WRITE);
			}
		}
	} else {
		if (create) {
			if (unlikely(f2fs_cp_error(sbi))) {
@@ -1207,7 +1200,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,

	f2fs_put_dnode(&dn);

	if (create) {
	if (map->m_may_create) {
		__do_map_lock(sbi, flag, false);
		f2fs_balance_fs(sbi, dn.node_changed);
	}
@@ -1233,7 +1226,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
	}
	f2fs_put_dnode(&dn);
unlock_out:
	if (create) {
	if (map->m_may_create) {
		__do_map_lock(sbi, flag, false);
		f2fs_balance_fs(sbi, dn.node_changed);
	}
@@ -1255,6 +1248,7 @@ bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
	map.m_next_pgofs = NULL;
	map.m_next_extent = NULL;
	map.m_seg_type = NO_CHECK_TYPE;
	map.m_may_create = false;
	last_lblk = F2FS_BLK_ALIGN(pos + len);

	while (map.m_lblk < last_lblk) {
@@ -1269,7 +1263,7 @@ bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)

static int __get_data_block(struct inode *inode, sector_t iblock,
			struct buffer_head *bh, int create, int flag,
			pgoff_t *next_pgofs, int seg_type)
			pgoff_t *next_pgofs, int seg_type, bool may_write)
{
	struct f2fs_map_blocks map;
	int err;
@@ -1279,6 +1273,7 @@ static int __get_data_block(struct inode *inode, sector_t iblock,
	map.m_next_pgofs = next_pgofs;
	map.m_next_extent = NULL;
	map.m_seg_type = seg_type;
	map.m_may_create = may_write;

	err = f2fs_map_blocks(inode, &map, create, flag);
	if (!err) {
@@ -1295,7 +1290,16 @@ static int get_data_block(struct inode *inode, sector_t iblock,
{
	return __get_data_block(inode, iblock, bh_result, create,
							flag, next_pgofs,
							NO_CHECK_TYPE);
							NO_CHECK_TYPE, create);
}

static int get_data_block_dio_write(struct inode *inode, sector_t iblock,
			struct buffer_head *bh_result, int create)
{
	return __get_data_block(inode, iblock, bh_result, create,
				F2FS_GET_BLOCK_DIO, NULL,
				f2fs_rw_hint_to_seg_type(inode->i_write_hint),
				true);
}

static int get_data_block_dio(struct inode *inode, sector_t iblock,
@@ -1303,8 +1307,8 @@ static int get_data_block_dio(struct inode *inode, sector_t iblock,
{
	return __get_data_block(inode, iblock, bh_result, create,
				F2FS_GET_BLOCK_DIO, NULL,
						f2fs_rw_hint_to_seg_type(
							inode->i_write_hint));
				f2fs_rw_hint_to_seg_type(inode->i_write_hint),
				false);
}

static int get_data_block_bmap(struct inode *inode, sector_t iblock,
@@ -1316,7 +1320,7 @@ static int get_data_block_bmap(struct inode *inode, sector_t iblock,

	return __get_data_block(inode, iblock, bh_result, create,
						F2FS_GET_BLOCK_BMAP, NULL,
						NO_CHECK_TYPE);
						NO_CHECK_TYPE, create);
}

static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
@@ -1523,6 +1527,7 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
	map.m_next_pgofs = NULL;
	map.m_next_extent = NULL;
	map.m_seg_type = NO_CHECK_TYPE;
	map.m_may_create = false;

	for (; nr_pages; nr_pages--) {
		if (pages) {
@@ -1853,6 +1858,8 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
		if (fio->need_lock == LOCK_REQ)
			f2fs_unlock_op(fio->sbi);
		err = f2fs_inplace_write_data(fio);
		if (err && PageWriteback(page))
			end_page_writeback(page);
		trace_f2fs_do_write_data_page(fio->page, IPU);
		set_inode_flag(inode, FI_UPDATE_WRITE);
		return err;
@@ -2140,12 +2147,11 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
			if (PageWriteback(page)) {
				if (wbc->sync_mode != WB_SYNC_NONE)
					f2fs_wait_on_page_writeback(page,
								DATA, true);
							DATA, true, true);
				else
					goto continue_unlock;
			}

			BUG_ON(PageWriteback(page));
			if (!clear_page_dirty_for_io(page))
				goto continue_unlock;

@@ -2322,6 +2328,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
	bool locked = false;
	struct extent_info ei = {0,0,0};
	int err = 0;
	int flag;

	/*
	 * we already allocated all the blocks, so we don't need to get
@@ -2331,9 +2338,15 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
			!is_inode_flag_set(inode, FI_NO_PREALLOC))
		return 0;

	/* f2fs_lock_op avoids race between write CP and convert_inline_page */
	if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
		flag = F2FS_GET_BLOCK_DEFAULT;
	else
		flag = F2FS_GET_BLOCK_PRE_AIO;

	if (f2fs_has_inline_data(inode) ||
			(pos & PAGE_MASK) >= i_size_read(inode)) {
		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
		__do_map_lock(sbi, flag, true);
		locked = true;
	}
restart:
@@ -2371,6 +2384,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
				f2fs_put_dnode(&dn);
				__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
								true);
				WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
				locked = true;
				goto restart;
			}
@@ -2384,7 +2398,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
	f2fs_put_dnode(&dn);
unlock_out:
	if (locked)
		__do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
		__do_map_lock(sbi, flag, false);
	return err;
}

@@ -2465,7 +2479,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
		}
	}

	f2fs_wait_on_page_writeback(page, DATA, false);
	f2fs_wait_on_page_writeback(page, DATA, false, true);

	if (len == PAGE_SIZE || PageUptodate(page))
		return 0;
@@ -2557,6 +2571,53 @@ static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
	return 0;
}

static void f2fs_dio_end_io(struct bio *bio)
{
	struct f2fs_private_dio *dio = bio->bi_private;

	dec_page_count(F2FS_I_SB(dio->inode),
			dio->write ? F2FS_DIO_WRITE : F2FS_DIO_READ);

	bio->bi_private = dio->orig_private;
	bio->bi_end_io = dio->orig_end_io;

	kvfree(dio);

	bio_endio(bio);
}

static void f2fs_dio_submit_bio(struct bio *bio, struct inode *inode,
							loff_t file_offset)
{
	struct f2fs_private_dio *dio;
	bool write = (bio_op(bio) == REQ_OP_WRITE);
	int err;

	dio = f2fs_kzalloc(F2FS_I_SB(inode),
			sizeof(struct f2fs_private_dio), GFP_NOFS);
	if (!dio) {
		err = -ENOMEM;
		goto out;
	}

	dio->inode = inode;
	dio->orig_end_io = bio->bi_end_io;
	dio->orig_private = bio->bi_private;
	dio->write = write;

	bio->bi_end_io = f2fs_dio_end_io;
	bio->bi_private = dio;

	inc_page_count(F2FS_I_SB(inode),
			write ? F2FS_DIO_WRITE : F2FS_DIO_READ);

	submit_bio(bio);
	return;
out:
	bio->bi_error = -EIO;
	bio_endio(bio);
}

static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
{
	struct address_space *mapping = iocb->ki_filp->f_mapping;
@@ -2625,7 +2686,10 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
			down_read(&fi->i_gc_rwsem[READ]);
	}

	err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
	err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
			iter, rw == WRITE ? get_data_block_dio_write :
			get_data_block_dio, NULL, f2fs_dio_submit_bio,
			DIO_LOCKING | DIO_SKIP_HOLES);

	if (do_opu)
		up_read(&fi->i_gc_rwsem[READ]);
+20 −11
Original line number Diff line number Diff line
@@ -53,6 +53,8 @@ static void update_general_status(struct f2fs_sb_info *sbi)
	si->vw_cnt = atomic_read(&sbi->vw_cnt);
	si->max_aw_cnt = atomic_read(&sbi->max_aw_cnt);
	si->max_vw_cnt = atomic_read(&sbi->max_vw_cnt);
	si->nr_dio_read = get_pages(sbi, F2FS_DIO_READ);
	si->nr_dio_write = get_pages(sbi, F2FS_DIO_WRITE);
	si->nr_wb_cp_data = get_pages(sbi, F2FS_WB_CP_DATA);
	si->nr_wb_data = get_pages(sbi, F2FS_WB_DATA);
	si->nr_rd_data = get_pages(sbi, F2FS_RD_DATA);
@@ -62,7 +64,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
		si->nr_flushed =
			atomic_read(&SM_I(sbi)->fcc_info->issued_flush);
		si->nr_flushing =
			atomic_read(&SM_I(sbi)->fcc_info->issing_flush);
			atomic_read(&SM_I(sbi)->fcc_info->queued_flush);
		si->flush_list_empty =
			llist_empty(&SM_I(sbi)->fcc_info->issue_list);
	}
@@ -70,7 +72,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
		si->nr_discarded =
			atomic_read(&SM_I(sbi)->dcc_info->issued_discard);
		si->nr_discarding =
			atomic_read(&SM_I(sbi)->dcc_info->issing_discard);
			atomic_read(&SM_I(sbi)->dcc_info->queued_discard);
		si->nr_discard_cmd =
			atomic_read(&SM_I(sbi)->dcc_info->discard_cmd_cnt);
		si->undiscard_blks = SM_I(sbi)->dcc_info->undiscard_blks;
@@ -94,7 +96,9 @@ static void update_general_status(struct f2fs_sb_info *sbi)
	si->free_secs = free_sections(sbi);
	si->prefree_count = prefree_segments(sbi);
	si->dirty_count = dirty_segments(sbi);
	if (sbi->node_inode)
		si->node_pages = NODE_MAPPING(sbi)->nrpages;
	if (sbi->meta_inode)
		si->meta_pages = META_MAPPING(sbi)->nrpages;
	si->nats = NM_I(sbi)->nat_cnt;
	si->dirty_nats = NM_I(sbi)->dirty_nat_cnt;
@@ -173,7 +177,6 @@ static void update_sit_info(struct f2fs_sb_info *sbi)
static void update_mem_info(struct f2fs_sb_info *sbi)
{
	struct f2fs_stat_info *si = F2FS_STAT(sbi);
	unsigned npages;
	int i;

	if (si->base_mem)
@@ -197,7 +200,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
	si->base_mem += 2 * SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
	si->base_mem += SIT_VBLOCK_MAP_SIZE * MAIN_SEGS(sbi);
	si->base_mem += SIT_VBLOCK_MAP_SIZE;
	if (sbi->segs_per_sec > 1)
	if (__is_large_section(sbi))
		si->base_mem += MAIN_SECS(sbi) * sizeof(struct sec_entry);
	si->base_mem += __bitmap_size(sbi, SIT_BITMAP);

@@ -256,11 +259,15 @@ static void update_mem_info(struct f2fs_sb_info *sbi)
						sizeof(struct extent_node);

	si->page_mem = 0;
	npages = NODE_MAPPING(sbi)->nrpages;
	if (sbi->node_inode) {
		unsigned npages = NODE_MAPPING(sbi)->nrpages;
		si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
	npages = META_MAPPING(sbi)->nrpages;
	}
	if (sbi->meta_inode) {
		unsigned npages = META_MAPPING(sbi)->nrpages;
		si->page_mem += (unsigned long long)npages << PAGE_SHIFT;
	}
}

static int stat_show(struct seq_file *s, void *v)
{
@@ -374,6 +381,8 @@ static int stat_show(struct seq_file *s, void *v)
		seq_printf(s, "  - Inner Struct Count: tree: %d(%d), node: %d\n",
				si->ext_tree, si->zombie_tree, si->ext_node);
		seq_puts(s, "\nBalancing F2FS Async:\n");
		seq_printf(s, "  - DIO (R: %4d, W: %4d)\n",
			   si->nr_dio_read, si->nr_dio_write);
		seq_printf(s, "  - IO_R (Data: %4d, Node: %4d, Meta: %4d\n",
			   si->nr_rd_data, si->nr_rd_node, si->nr_rd_meta);
		seq_printf(s, "  - IO_W (CP: %4d, Data: %4d, Flush: (%4d %4d %4d), "
@@ -510,7 +519,7 @@ void f2fs_destroy_stats(struct f2fs_sb_info *sbi)
	list_del(&si->stat_list);
	mutex_unlock(&f2fs_stat_mutex);

	kfree(si);
	kvfree(si);
}

int __init f2fs_create_root_stats(void)
Loading