Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 458e6197 authored by Jaegeuk Kim's avatar Jaegeuk Kim
Browse files

f2fs: refactor bio->rw handling



This patch introduces f2fs_io_info to mitigate the complex parameter list.

struct f2fs_io_info {
	enum page_type type;		/* contains DATA/NODE/META/META_FLUSH */
	int rw;				/* contains R/RS/W/WS */
	int rw_flag;			/* contains REQ_META/REQ_PRIO */
}

1. f2fs_write_data_pages
 - DATA
 - WRITE_SYNC is set when wbc->WB_SYNC_ALL.

2. sync_node_pages
 - NODE
 - WRITE_SYNC all the time

3. sync_meta_pages
 - META
 - WRITE_SYNC all the time
 - REQ_META | REQ_PRIO all the time

 ** f2fs_submit_merged_bio() handles META_FLUSH.

4. ra_nat_pages, ra_sit_pages, ra_sum_pages
 - META
 - READ_SYNC

Cc: Fan Li <fanofcode.li@samsung.com>
Cc: Changman Lee <cm224.lee@samsung.com>
Signed-off-by: default avatarJaegeuk Kim <jaegeuk.kim@samsung.com>
parent 63a0b7cb
Loading
Loading
Loading
Loading
+5 −6
Original line number Diff line number Diff line
@@ -164,8 +164,7 @@ long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
	}

	if (nwritten)
		f2fs_submit_merged_bio(sbi, type, nr_to_write == LONG_MAX,
								WRITE);
		f2fs_submit_merged_bio(sbi, type, WRITE);

	return nwritten;
}
@@ -598,7 +597,7 @@ retry:
		 * We should submit bio, since it exists several
		 * wribacking dentry pages in the freeing inode.
		 */
		f2fs_submit_merged_bio(sbi, DATA, true, WRITE);
		f2fs_submit_merged_bio(sbi, DATA, WRITE);
	}
	goto retry;
}
@@ -804,9 +803,9 @@ void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)

	trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish block_ops");

	f2fs_submit_merged_bio(sbi, DATA, true, WRITE);
	f2fs_submit_merged_bio(sbi, NODE, true, WRITE);
	f2fs_submit_merged_bio(sbi, META, true, WRITE);
	f2fs_submit_merged_bio(sbi, DATA, WRITE);
	f2fs_submit_merged_bio(sbi, NODE, WRITE);
	f2fs_submit_merged_bio(sbi, META, WRITE);

	/*
	 * update checkpoint pack index
+43 −42
Original line number Diff line number Diff line
@@ -93,37 +93,28 @@ static void f2fs_write_end_io(struct bio *bio, int err)
	bio_put(bio);
}

static void __submit_merged_bio(struct f2fs_sb_info *sbi,
				struct f2fs_bio_info *io,
				enum page_type type, bool sync, int rw)
static void __submit_merged_bio(struct f2fs_bio_info *io)
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
	struct f2fs_io_info *fio = &io->fio;
	int rw;

	if (!io->bio)
		return;

	if (btype == META)
		rw |= REQ_META;
	rw = fio->rw | fio->rw_flag;

	if (is_read_io(rw)) {
		if (sync)
			rw |= READ_SYNC;
		submit_bio(rw, io->bio);
		trace_f2fs_submit_read_bio(sbi->sb, rw, type, io->bio);
		trace_f2fs_submit_read_bio(io->sbi->sb, rw, fio->type, io->bio);
		io->bio = NULL;
		return;
	}

	if (sync)
		rw |= WRITE_SYNC;
	if (type >= META_FLUSH)
		rw |= WRITE_FLUSH_FUA;

	/*
	 * META_FLUSH is only from the checkpoint procedure, and we should wait
	 * this metadata bio for FS consistency.
	 */
	if (type == META_FLUSH) {
	if (fio->type == META_FLUSH) {
		DECLARE_COMPLETION_ONSTACK(wait);
		io->bio->bi_private = &wait;
		submit_bio(rw, io->bio);
@@ -131,12 +122,12 @@ static void __submit_merged_bio(struct f2fs_sb_info *sbi,
	} else {
		submit_bio(rw, io->bio);
	}
	trace_f2fs_submit_write_bio(sbi->sb, rw, btype, io->bio);
	trace_f2fs_submit_write_bio(io->sbi->sb, rw, fio->type, io->bio);
	io->bio = NULL;
}

void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
				enum page_type type, bool sync, int rw)
				enum page_type type, int rw)
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
	struct f2fs_bio_info *io;
@@ -144,7 +135,13 @@ void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
	io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];

	mutex_lock(&io->io_mutex);
	__submit_merged_bio(sbi, io, type, sync, rw);

	/* change META to META_FLUSH in the checkpoint procedure */
	if (type >= META_FLUSH) {
		io->fio.type = META_FLUSH;
		io->fio.rw = WRITE_FLUSH_FUA;
	}
	__submit_merged_bio(io);
	mutex_unlock(&io->io_mutex);
}

@@ -178,33 +175,33 @@ int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page,
}

void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page,
			block_t blk_addr, enum page_type type, int rw)
			block_t blk_addr, struct f2fs_io_info *fio)
{
	enum page_type btype = PAGE_TYPE_OF_BIO(type);
	enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
	struct block_device *bdev = sbi->sb->s_bdev;
	struct f2fs_bio_info *io;
	int bio_blocks;

	io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
	io = is_read_io(fio->rw) ? &sbi->read_io : &sbi->write_io[btype];

	verify_block_addr(sbi, blk_addr);

	mutex_lock(&io->io_mutex);

	if (!is_read_io(rw))
	if (!is_read_io(fio->rw))
		inc_page_count(sbi, F2FS_WRITEBACK);

	if (io->bio && (io->last_block_in_bio != blk_addr - 1 ||
						io->rw_flag != rw))
		__submit_merged_bio(sbi, io, type, false, io->rw_flag);
						io->fio.rw != fio->rw))
		__submit_merged_bio(io);
alloc_new:
	if (io->bio == NULL) {
		bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
		io->bio = __bio_alloc(bdev, bio_blocks);
		io->bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
		io->bio->bi_end_io = is_read_io(rw) ? f2fs_read_end_io :
		io->bio->bi_end_io = is_read_io(fio->rw) ? f2fs_read_end_io :
							f2fs_write_end_io;
		io->rw_flag = rw;
		io->fio = *fio;
		/*
		 * The end_io will be assigned at the sumbission phase.
		 * Until then, let bio_add_page() merge consecutive IOs as much
@@ -214,14 +211,14 @@ alloc_new:

	if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) <
							PAGE_CACHE_SIZE) {
		__submit_merged_bio(sbi, io, type, false, rw);
		__submit_merged_bio(io);
		goto alloc_new;
	}

	io->last_block_in_bio = blk_addr;

	mutex_unlock(&io->io_mutex);
	trace_f2fs_submit_page_mbio(page, rw, type, blk_addr);
	trace_f2fs_submit_page_mbio(page, fio->rw, fio->type, blk_addr);
}

/*
@@ -643,10 +640,10 @@ static int f2fs_read_data_pages(struct file *file,
	return mpage_readpages(mapping, pages, nr_pages, get_data_block_ro);
}

int do_write_data_page(struct page *page, struct writeback_control *wbc)
int do_write_data_page(struct page *page, struct f2fs_io_info *fio)
{
	struct inode *inode = page->mapping->host;
	block_t old_blk_addr, new_blk_addr;
	block_t old_blkaddr, new_blkaddr;
	struct dnode_of_data dn;
	int err = 0;

@@ -655,10 +652,10 @@ int do_write_data_page(struct page *page, struct writeback_control *wbc)
	if (err)
		return err;

	old_blk_addr = dn.data_blkaddr;
	old_blkaddr = dn.data_blkaddr;

	/* This page is already truncated */
	if (old_blk_addr == NULL_ADDR)
	if (old_blkaddr == NULL_ADDR)
		goto out_writepage;

	set_page_writeback(page);
@@ -667,15 +664,13 @@ int do_write_data_page(struct page *page, struct writeback_control *wbc)
	 * If current allocation needs SSR,
	 * it had better in-place writes for updated data.
	 */
	if (unlikely(old_blk_addr != NEW_ADDR &&
	if (unlikely(old_blkaddr != NEW_ADDR &&
			!is_cold_data(page) &&
			need_inplace_update(inode))) {
		rewrite_data_page(F2FS_SB(inode->i_sb), page,
						old_blk_addr, wbc);
		rewrite_data_page(page, old_blkaddr, fio);
	} else {
		write_data_page(inode, page, &dn,
				old_blk_addr, &new_blk_addr, wbc);
		update_extent_cache(new_blk_addr, &dn);
		write_data_page(page, &dn, &new_blkaddr, fio);
		update_extent_cache(new_blkaddr, &dn);
	}
out_writepage:
	f2fs_put_dnode(&dn);
@@ -693,6 +688,11 @@ static int f2fs_write_data_page(struct page *page,
	unsigned offset;
	bool need_balance_fs = false;
	int err = 0;
	struct f2fs_io_info fio = {
		.type = DATA,
		.rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC: WRITE,
		.rw_flag = 0,
	};

	if (page->index < end_index)
		goto write;
@@ -721,10 +721,10 @@ write:
	if (S_ISDIR(inode->i_mode)) {
		dec_page_count(sbi, F2FS_DIRTY_DENTS);
		inode_dec_dirty_dents(inode);
		err = do_write_data_page(page, wbc);
		err = do_write_data_page(page, &fio);
	} else {
		f2fs_lock_op(sbi);
		err = do_write_data_page(page, wbc);
		err = do_write_data_page(page, &fio);
		f2fs_unlock_op(sbi);
		need_balance_fs = true;
	}
@@ -734,7 +734,7 @@ write:
		goto redirty_out;

	if (wbc->for_reclaim)
		f2fs_submit_merged_bio(sbi, DATA, true, WRITE);
		f2fs_submit_merged_bio(sbi, DATA, WRITE);

	clear_cold_data(page);
out:
@@ -786,7 +786,8 @@ static int f2fs_write_data_pages(struct address_space *mapping,
	ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
	if (locked)
		mutex_unlock(&sbi->writepages);
	f2fs_submit_merged_bio(sbi, DATA, wbc->sync_mode == WB_SYNC_ALL, WRITE);

	f2fs_submit_merged_bio(sbi, DATA, WRITE);

	remove_dirty_dir_inode(inode);

+14 −8
Original line number Diff line number Diff line
@@ -364,11 +364,18 @@ enum page_type {
	META_FLUSH,
};

struct f2fs_io_info {
	enum page_type type;		/* contains DATA/NODE/META/META_FLUSH */
	int rw;				/* contains R/RS/W/WS */
	int rw_flag;			/* contains REQ_META/REQ_PRIO */
};

#define is_read_io(rw)	(((rw) & 1) == READ)
struct f2fs_bio_info {
	struct f2fs_sb_info *sbi;	/* f2fs superblock */
	struct bio *bio;		/* bios to merge */
	sector_t last_block_in_bio;	/* last block number */
	int rw_flag;				/* rw flag for all pages */
	struct f2fs_io_info fio;	/* store buffered io info. */
	struct mutex io_mutex;		/* mutex for bio */
};

@@ -1098,10 +1105,9 @@ struct page *get_sum_page(struct f2fs_sb_info *, unsigned int);
void write_meta_page(struct f2fs_sb_info *, struct page *);
void write_node_page(struct f2fs_sb_info *, struct page *, unsigned int,
					block_t, block_t *);
void write_data_page(struct inode *, struct page *, struct dnode_of_data*,
			block_t, block_t *, struct writeback_control *);
void rewrite_data_page(struct f2fs_sb_info *, struct page *, block_t,
				struct writeback_control *);
void write_data_page(struct page *, struct dnode_of_data *, block_t *,
					struct f2fs_io_info *);
void rewrite_data_page(struct page *, block_t, struct f2fs_io_info *);
void recover_data_page(struct f2fs_sb_info *, struct page *,
				struct f2fs_summary *, block_t, block_t);
void rewrite_node_page(struct f2fs_sb_info *, struct page *,
@@ -1142,17 +1148,17 @@ void destroy_checkpoint_caches(void);
/*
 * data.c
 */
void f2fs_submit_merged_bio(struct f2fs_sb_info *, enum page_type, bool, int);
void f2fs_submit_merged_bio(struct f2fs_sb_info *, enum page_type, int);
int f2fs_submit_page_bio(struct f2fs_sb_info *, struct page *, block_t, int);
void f2fs_submit_page_mbio(struct f2fs_sb_info *, struct page *, block_t,
							enum page_type, int);
						struct f2fs_io_info *);
int reserve_new_block(struct dnode_of_data *);
int f2fs_reserve_block(struct dnode_of_data *, pgoff_t);
void update_extent_cache(block_t, struct dnode_of_data *);
struct page *find_data_page(struct inode *, pgoff_t, bool);
struct page *get_lock_data_page(struct inode *, pgoff_t);
struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool);
int do_write_data_page(struct page *, struct writeback_control *);
int do_write_data_page(struct page *, struct f2fs_io_info *);

/*
 * gc.c
+6 −4
Original line number Diff line number Diff line
@@ -520,8 +520,10 @@ static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,

static void move_data_page(struct inode *inode, struct page *page, int gc_type)
{
	struct writeback_control wbc = {
		.sync_mode = 1,
	struct f2fs_io_info fio = {
		.type = DATA,
		.rw = WRITE_SYNC,
		.rw_flag = 0,
	};

	if (gc_type == BG_GC) {
@@ -540,7 +542,7 @@ static void move_data_page(struct inode *inode, struct page *page, int gc_type)
			inode_dec_dirty_dents(inode);
		}
		set_cold_data(page);
		do_write_data_page(page, &wbc);
		do_write_data_page(page, &fio);
		clear_cold_data(page);
	}
out:
@@ -634,7 +636,7 @@ next_iput:
		goto next_step;

	if (gc_type == FG_GC) {
		f2fs_submit_merged_bio(sbi, DATA, true, WRITE);
		f2fs_submit_merged_bio(sbi, DATA, WRITE);

		/*
		 * In the case of FG_GC, it'd be better to reclaim this victim
+16 −6
Original line number Diff line number Diff line
@@ -92,6 +92,12 @@ static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid)
	struct page *page;
	pgoff_t index;
	int i;
	struct f2fs_io_info fio = {
		.type = META,
		.rw = READ_SYNC,
		.rw_flag = REQ_META | REQ_PRIO
	};


	for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) {
		if (unlikely(nid >= nm_i->max_nid))
@@ -106,11 +112,11 @@ static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid)
			f2fs_put_page(page, 1);
			continue;
		}
		f2fs_submit_page_mbio(sbi, page, index, META, READ);
		f2fs_submit_page_mbio(sbi, page, index, &fio);
		mark_page_accessed(page);
		f2fs_put_page(page, 0);
	}
	f2fs_submit_merged_bio(sbi, META, true, READ);
	f2fs_submit_merged_bio(sbi, META, READ);
}

static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
@@ -1136,8 +1142,7 @@ continue_unlock:
	}

	if (wrote)
		f2fs_submit_merged_bio(sbi, NODE, wbc->sync_mode == WB_SYNC_ALL,
									WRITE);
		f2fs_submit_merged_bio(sbi, NODE, WRITE);
	return nwritten;
}

@@ -1574,6 +1579,11 @@ static int ra_sum_pages(struct f2fs_sb_info *sbi, struct list_head *pages,
{
	struct page *page;
	int page_idx = start;
	struct f2fs_io_info fio = {
		.type = META,
		.rw = READ_SYNC,
		.rw_flag = REQ_META | REQ_PRIO
	};

	for (; page_idx < start + nrpages; page_idx++) {
		/* alloc temporal page for read node summary info*/
@@ -1594,9 +1604,9 @@ static int ra_sum_pages(struct f2fs_sb_info *sbi, struct list_head *pages,
	}

	list_for_each_entry(page, pages, lru)
		f2fs_submit_page_mbio(sbi, page, page->index, META, READ);
		f2fs_submit_page_mbio(sbi, page, page->index, &fio);

	f2fs_submit_merged_bio(sbi, META, true, READ);
	f2fs_submit_merged_bio(sbi, META, READ);
	return 0;
}

Loading