Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 08cf7cf4 authored by Chao Yu's avatar Chao Yu Committed by Jaegeuk Kim
Browse files

f2fs: let sync node IO interrupt async one



Although mixed sync/async IOs can have continuous LBA, as they have
different IO priority, block IO scheduler will add them into different
queues and commit them separately, result in splited IOs which causes
wrose performance.

This patch gives high priority to synchronous IO of nodes, means that
once synchronous flow starts, it can interrupt asynchronous writeback
flow of system flusher, so more big IOs can be expected.

Signed-off-by: default avatarChao Yu <yuchao0@huawei.com>
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent 5dc93797
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -1085,7 +1085,9 @@ retry_flush_nodes:

	if (get_pages(sbi, F2FS_DIRTY_NODES)) {
		up_write(&sbi->node_write);
		atomic_inc(&sbi->wb_sync_req[NODE]);
		err = f2fs_sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO);
		atomic_dec(&sbi->wb_sync_req[NODE]);
		if (err) {
			up_write(&sbi->node_change);
			f2fs_unlock_all(sbi);
+5 −4
Original line number Diff line number Diff line
@@ -1914,6 +1914,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
	int ret = 0;
	int done = 0;
	struct pagevec pvec;
	struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
	int nr_pages;
	pgoff_t uninitialized_var(writeback_index);
	pgoff_t index;
@@ -1968,7 +1969,7 @@ retry:
			bool submitted = false;

			/* give a priority to WB_SYNC threads */
			if (atomic_read(&F2FS_M_SB(mapping)->wb_sync_req) &&
			if (atomic_read(&sbi->wb_sync_req[DATA]) &&
					wbc->sync_mode == WB_SYNC_NONE) {
				done = 1;
				break;
@@ -2088,8 +2089,8 @@ static int __f2fs_write_data_pages(struct address_space *mapping,

	/* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
	if (wbc->sync_mode == WB_SYNC_ALL)
		atomic_inc(&sbi->wb_sync_req);
	else if (atomic_read(&sbi->wb_sync_req))
		atomic_inc(&sbi->wb_sync_req[DATA]);
	else if (atomic_read(&sbi->wb_sync_req[DATA]))
		goto skip_write;

	blk_start_plug(&plug);
@@ -2097,7 +2098,7 @@ static int __f2fs_write_data_pages(struct address_space *mapping,
	blk_finish_plug(&plug);

	if (wbc->sync_mode == WB_SYNC_ALL)
		atomic_dec(&sbi->wb_sync_req);
		atomic_dec(&sbi->wb_sync_req[DATA]);
	/*
	 * if some pages were truncated, we cannot guarantee its mapping->host
	 * to detect pending bios.
+1 −1
Original line number Diff line number Diff line
@@ -1299,7 +1299,7 @@ struct f2fs_sb_info {
	struct percpu_counter alloc_valid_block_count;

	/* writeback control */
	atomic_t wb_sync_req;			/* count # of WB_SYNC threads */
	atomic_t wb_sync_req[META];	/* count # of WB_SYNC threads */

	/* valid inode count */
	struct percpu_counter total_valid_inode_count;
+2 −0
Original line number Diff line number Diff line
@@ -278,7 +278,9 @@ go_write:
		goto out;
	}
sync_nodes:
	atomic_inc(&sbi->wb_sync_req[NODE]);
	ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic);
	atomic_dec(&sbi->wb_sync_req[NODE]);
	if (ret)
		goto out;

+7 −0
Original line number Diff line number Diff line
@@ -473,12 +473,16 @@ static void gc_node_segment(struct f2fs_sb_info *sbi,
	block_t start_addr;
	int off;
	int phase = 0;
	bool fggc = (gc_type == FG_GC);

	start_addr = START_BLOCK(sbi, segno);

next_step:
	entry = sum;

	if (fggc && phase == 2)
		atomic_inc(&sbi->wb_sync_req[NODE]);

	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
		nid_t nid = le32_to_cpu(entry->nid);
		struct page *node_page;
@@ -525,6 +529,9 @@ next_step:

	if (++phase < 3)
		goto next_step;

	if (fggc)
		atomic_dec(&sbi->wb_sync_req[NODE]);
}

/*
Loading