Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a176f88e authored by Jaegeuk Kim's avatar Jaegeuk Kim
Browse files

Merge remote-tracking branch 'aosp/upstream-f2fs-stable-linux-4.19.y' into android-4.19-stable



* aosp/upstream-f2fs-stable-linux-4.19.y:
  f2fs: attach IO flags to the missing cases
  f2fs: add node_io_flag for bio flags likewise data_io_flag
  f2fs: remove unused parameter of f2fs_put_rpages_mapping()
  f2fs: handle readonly filesystem in f2fs_ioc_shutdown()
  f2fs: avoid utf8_strncasecmp() with unstable name
  f2fs: don't return vmalloc() memory from f2fs_kmalloc()
  f2fs: fix retry logic in f2fs_write_cache_pages()
  f2fs: fix wrong discard space
  f2fs: compress: don't compress any datas after cp stop
  f2fs: remove unneeded return value of __insert_discard_tree()
  f2fs: fix wrong value of tracepoint parameter
  f2fs: protect new segment allocation in expand_inode_data
  f2fs: code cleanup by removing ifdef macro surrounding
  writeback: Avoid skipping inode writeback
  f2fs: avoid inifinite loop to wait for flushing node pages at cp_error
  f2fs: compress: fix zstd data corruption
  f2fs: add compressed/gc data read IO stat
  f2fs: fix potential use-after-free issue
  f2fs: compress: don't handle non-compressed data in workqueue
  f2fs: remove redundant assignment to variable err
  f2fs: refactor resize_fs to avoid meta updates in progress
  f2fs: use round_up to enhance calculation
  f2fs: introduce F2FS_IOC_RESERVE_COMPRESS_BLOCKS
  f2fs: Avoid double lock for cp_rwsem during checkpoint
  f2fs: report delalloc reserve as non-free in statfs for project quota
  f2fs: Fix wrong stub helper update_sit_info
  f2fs: compress: let lz4 compressor handle output buffer budget properly
  f2fs: remove blk_plugging in block_operations
  f2fs: introduce F2FS_IOC_RELEASE_COMPRESS_BLOCKS
  f2fs: shrink spinlock coverage
  f2fs: correctly fix the parent inode number during fsync()
  f2fs: introduce mempool for {,de}compress intermediate page allocation
  f2fs: introduce f2fs_bmap_compress()
  f2fs: support fiemap on compressed inode
  f2fs: support partial truncation on compressed inode
  f2fs: remove redundant compress inode check
  f2fs: use strcmp() in parse_options()
  f2fs: Use the correct style for SPDX License Identifier

 Conflicts:
	fs/f2fs/data.c
	fs/f2fs/dir.c

Bug: 154167995
Change-Id: I04ec97a9cafef2d7b8736f36a2a8d244965cae9a
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@google.com>
parents d8a2d7d9 551162c9
Loading
Loading
Loading
Loading
+9 −0
Original line number Diff line number Diff line
@@ -333,6 +333,15 @@ Description: Give a way to attach REQ_META|FUA to data writes
		*    5 |    4 |   3 |    2 |    1 |   0 |
		* Cold | Warm | Hot | Cold | Warm | Hot |

What:		/sys/fs/f2fs/<disk>/node_io_flag
Date:		June 2020
Contact:	"Jaegeuk Kim" <jaegeuk@kernel.org>
Description:	Give a way to attach REQ_META|FUA to node writes
		given temperature-based bits. Now the bits indicate:
		*      REQ_META     |      REQ_FUA      |
		*    5 |    4 |   3 |    2 |    1 |   0 |
		* Cold | Warm | Hot | Cold | Warm | Hot |

What:		/sys/fs/f2fs/<disk>/iostat_period_ms
Date:		April 2020
Contact:	"Daeho Jeong" <daehojeong@google.com>
+1 −1
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * fs/f2fs/acl.h
 *
+13 −11
Original line number Diff line number Diff line
@@ -895,7 +895,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
	int i;
	int err;

	sbi->ckpt = f2fs_kzalloc(sbi, array_size(blk_size, cp_blks),
	sbi->ckpt = f2fs_kvzalloc(sbi, array_size(blk_size, cp_blks),
				  GFP_KERNEL);
	if (!sbi->ckpt)
		return -ENOMEM;
@@ -1166,10 +1166,12 @@ static int block_operations(struct f2fs_sb_info *sbi)
		.nr_to_write = LONG_MAX,
		.for_reclaim = 0,
	};
	struct blk_plug plug;
	int err = 0, cnt = 0;

	blk_start_plug(&plug);
	/*
	 * Let's flush inline_data in dirty node pages.
	 */
	f2fs_flush_inline_data(sbi);

retry_flush_quotas:
	f2fs_lock_all(sbi);
@@ -1198,7 +1200,7 @@ static int block_operations(struct f2fs_sb_info *sbi)
		f2fs_unlock_all(sbi);
		err = f2fs_sync_dirty_inodes(sbi, DIR_INODE);
		if (err)
			goto out;
			return err;
		cond_resched();
		goto retry_flush_quotas;
	}
@@ -1214,7 +1216,7 @@ static int block_operations(struct f2fs_sb_info *sbi)
		f2fs_unlock_all(sbi);
		err = f2fs_sync_inode_meta(sbi);
		if (err)
			goto out;
			return err;
		cond_resched();
		goto retry_flush_quotas;
	}
@@ -1230,7 +1232,7 @@ static int block_operations(struct f2fs_sb_info *sbi)
		if (err) {
			up_write(&sbi->node_change);
			f2fs_unlock_all(sbi);
			goto out;
			return err;
		}
		cond_resched();
		goto retry_flush_nodes;
@@ -1242,8 +1244,6 @@ static int block_operations(struct f2fs_sb_info *sbi)
	 */
	__prepare_cp_block(sbi);
	up_write(&sbi->node_change);
out:
	blk_finish_plug(&plug);
	return err;
}

@@ -1562,6 +1562,7 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
			return 0;
		f2fs_warn(sbi, "Start checkpoint disabled!");
	}
	if (cpc->reason != CP_RESIZE)
		mutex_lock(&sbi->cp_mutex);

	if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
@@ -1631,6 +1632,7 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
	f2fs_update_time(sbi, CP_TIME);
	trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
out:
	if (cpc->reason != CP_RESIZE)
		mutex_unlock(&sbi->cp_mutex);
	return err;
}
+113 −32
Original line number Diff line number Diff line
@@ -12,6 +12,7 @@
#include <linux/lzo.h>
#include <linux/lz4.h>
#include <linux/zstd.h>
#include <linux/moduleparam.h>

#include "f2fs.h"
#include "node.h"
@@ -65,15 +66,6 @@ static void f2fs_set_compressed_page(struct page *page,
	page->mapping = inode->i_mapping;
}

static void f2fs_put_compressed_page(struct page *page)
{
	set_page_private(page, (unsigned long)NULL);
	ClearPagePrivate(page);
	page->mapping = NULL;
	unlock_page(page);
	put_page(page);
}

static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
{
	int i;
@@ -98,8 +90,7 @@ static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
	f2fs_drop_rpages(cc, len, true);
}

static void f2fs_put_rpages_mapping(struct compress_ctx *cc,
				struct address_space *mapping,
static void f2fs_put_rpages_mapping(struct address_space *mapping,
				pgoff_t start, int len)
{
	int i;
@@ -236,7 +227,12 @@ static int lz4_init_compress_ctx(struct compress_ctx *cc)
	if (!cc->private)
		return -ENOMEM;

	cc->clen = LZ4_compressBound(PAGE_SIZE << cc->log_cluster_size);
	/*
	 * we do not change cc->clen to LZ4_compressBound(inputsize) to
	 * adapt worst compress case, because lz4 compressor can handle
	 * output budget properly.
	 */
	cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
	return 0;
}

@@ -252,11 +248,9 @@ static int lz4_compress_pages(struct compress_ctx *cc)

	len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
						cc->clen, cc->private);
	if (!len) {
		printk_ratelimited("%sF2FS-fs (%s): lz4 compress failed\n",
				KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id);
		return -EIO;
	}
	if (!len)
		return -EAGAIN;

	cc->clen = len;
	return 0;
}
@@ -366,6 +360,13 @@ static int zstd_compress_pages(struct compress_ctx *cc)
		return -EIO;
	}

	/*
	 * there is compressed data remained in intermediate buffer due to
	 * no more space in cbuf.cdata
	 */
	if (ret)
		return -EAGAIN;

	cc->clen = outbuf.pos;
	return 0;
}
@@ -476,17 +477,47 @@ bool f2fs_is_compress_backend_ready(struct inode *inode)
	return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
}

static struct page *f2fs_grab_page(void)
static mempool_t *compress_page_pool = NULL;
static int num_compress_pages = 512;
module_param(num_compress_pages, uint, 0444);
MODULE_PARM_DESC(num_compress_pages,
		"Number of intermediate compress pages to preallocate");

int f2fs_init_compress_mempool(void)
{
	compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
	if (!compress_page_pool)
		return -ENOMEM;

	return 0;
}

void f2fs_destroy_compress_mempool(void)
{
	mempool_destroy(compress_page_pool);
}

static struct page *f2fs_compress_alloc_page(void)
{
	struct page *page;

	page = alloc_page(GFP_NOFS);
	if (!page)
		return NULL;
	page = mempool_alloc(compress_page_pool, GFP_NOFS);
	lock_page(page);

	return page;
}

static void f2fs_compress_free_page(struct page *page)
{
	if (!page)
		return;
	set_page_private(page, (unsigned long)NULL);
	ClearPagePrivate(page);
	page->mapping = NULL;
	unlock_page(page);
	mempool_free(page, compress_page_pool);
}

static int f2fs_compress_pages(struct compress_ctx *cc)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
@@ -516,7 +547,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
	}

	for (i = 0; i < cc->nr_cpages; i++) {
		cc->cpages[i] = f2fs_grab_page();
		cc->cpages[i] = f2fs_compress_alloc_page();
		if (!cc->cpages[i]) {
			ret = -ENOMEM;
			goto out_free_cpages;
@@ -561,7 +592,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
	vunmap(cc->rbuf);

	for (i = nr_cpages; i < cc->nr_cpages; i++) {
		f2fs_put_compressed_page(cc->cpages[i]);
		f2fs_compress_free_page(cc->cpages[i]);
		cc->cpages[i] = NULL;
	}

@@ -581,7 +612,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
out_free_cpages:
	for (i = 0; i < cc->nr_cpages; i++) {
		if (cc->cpages[i])
			f2fs_put_compressed_page(cc->cpages[i]);
			f2fs_compress_free_page(cc->cpages[i]);
	}
	kfree(cc->cpages);
	cc->cpages = NULL;
@@ -788,6 +819,8 @@ static bool cluster_may_compress(struct compress_ctx *cc)
		return false;
	if (!f2fs_cluster_is_full(cc))
		return false;
	if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
		return false;
	return __cluster_may_compress(cc);
}

@@ -879,7 +912,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,

		if (!PageUptodate(page)) {
			f2fs_unlock_rpages(cc, i + 1);
			f2fs_put_rpages_mapping(cc, mapping, start_idx,
			f2fs_put_rpages_mapping(mapping, start_idx,
					cc->cluster_size);
			f2fs_destroy_compress_ctx(cc);
			goto retry;
@@ -914,7 +947,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
unlock_pages:
	f2fs_unlock_rpages(cc, i);
release_pages:
	f2fs_put_rpages_mapping(cc, mapping, start_idx, i);
	f2fs_put_rpages_mapping(mapping, start_idx, i);
	f2fs_destroy_compress_ctx(cc);
	return ret;
}
@@ -954,6 +987,55 @@ bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
	return first_index;
}

int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
{
	void *fsdata = NULL;
	struct page *pagep;
	int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
	pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
							log_cluster_size;
	int err;

	err = f2fs_is_compressed_cluster(inode, start_idx);
	if (err < 0)
		return err;

	/* truncate normal cluster */
	if (!err)
		return f2fs_do_truncate_blocks(inode, from, lock);

	/* truncate compressed cluster */
	err = f2fs_prepare_compress_overwrite(inode, &pagep,
						start_idx, &fsdata);

	/* should not be a normal cluster */
	f2fs_bug_on(F2FS_I_SB(inode), err == 0);

	if (err <= 0)
		return err;

	if (err > 0) {
		struct page **rpages = fsdata;
		int cluster_size = F2FS_I(inode)->i_cluster_size;
		int i;

		for (i = cluster_size - 1; i >= 0; i--) {
			loff_t start = rpages[i]->index << PAGE_SHIFT;

			if (from <= start) {
				zero_user_segment(rpages[i], 0, PAGE_SIZE);
			} else {
				zero_user_segment(rpages[i], from - start,
								PAGE_SIZE);
				break;
			}
		}

		f2fs_compress_write_end(inode, fsdata, start_idx, true);
	}
	return 0;
}

static int f2fs_write_compressed_pages(struct compress_ctx *cc,
					int *submitted,
					struct writeback_control *wbc,
@@ -1135,7 +1217,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
	if (unlikely(bio->bi_status))
		mapping_set_error(cic->inode->i_mapping, -EIO);

	f2fs_put_compressed_page(page);
	f2fs_compress_free_page(page);

	dec_page_count(sbi, F2FS_WB_DATA);

@@ -1296,7 +1378,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
	for (i = 0; i < dic->nr_cpages; i++) {
		struct page *page;

		page = f2fs_grab_page();
		page = f2fs_compress_alloc_page();
		if (!page)
			goto out_free;

@@ -1316,7 +1398,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
			continue;
		}

		dic->tpages[i] = f2fs_grab_page();
		dic->tpages[i] = f2fs_compress_alloc_page();
		if (!dic->tpages[i])
			goto out_free;
	}
@@ -1338,8 +1420,7 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
				continue;
			if (!dic->tpages[i])
				continue;
			unlock_page(dic->tpages[i]);
			put_page(dic->tpages[i]);
			f2fs_compress_free_page(dic->tpages[i]);
		}
		kfree(dic->tpages);
	}
@@ -1348,7 +1429,7 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
		for (i = 0; i < dic->nr_cpages; i++) {
			if (!dic->cpages[i])
				continue;
			f2fs_put_compressed_page(dic->cpages[i]);
			f2fs_compress_free_page(dic->cpages[i]);
		}
		kfree(dic->cpages);
	}
+122 −26
Original line number Diff line number Diff line
@@ -115,7 +115,8 @@ static enum count_type __read_io_type(struct page *page)
/* postprocessing steps for read bios */
enum bio_post_read_step {
	STEP_DECRYPT,
	STEP_DECOMPRESS,
	STEP_DECOMPRESS_NOWQ,		/* handle normal cluster data inplace */
	STEP_DECOMPRESS,		/* handle compressed cluster data in workqueue */
	STEP_VERITY,
};

@@ -579,22 +580,28 @@ void f2fs_submit_bio(struct f2fs_sb_info *sbi,
	__submit_bio(sbi, bio, type);
}

static void __attach_data_io_flag(struct f2fs_io_info *fio)
static void __attach_io_flag(struct f2fs_io_info *fio)
{
	struct f2fs_sb_info *sbi = fio->sbi;
	unsigned int temp_mask = (1 << NR_TEMP_TYPE) - 1;
	unsigned int fua_flag = sbi->data_io_flag & temp_mask;
	unsigned int meta_flag = (sbi->data_io_flag >> NR_TEMP_TYPE) &
								temp_mask;
	unsigned int io_flag, fua_flag, meta_flag;

	if (fio->type == DATA)
		io_flag = sbi->data_io_flag;
	else if (fio->type == NODE)
		io_flag = sbi->node_io_flag;
	else
		return;

	fua_flag = io_flag & temp_mask;
	meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;

	/*
	 * data io flag bits per temp:
	 * data/node io flag bits per temp:
	 *      REQ_META     |      REQ_FUA      |
	 *    5 |    4 |   3 |    2 |    1 |   0 |
	 * Cold | Warm | Hot | Cold | Warm | Hot |
	 */
	if (fio->type != DATA)
		return;

	if ((1 << fio->temp) & meta_flag)
		fio->op_flags |= REQ_META;
	if ((1 << fio->temp) & fua_flag)
@@ -608,7 +615,7 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
	if (!io->bio)
		return;

	__attach_data_io_flag(fio);
	__attach_io_flag(fio);
	bio_set_op_attrs(io->bio, fio->op, fio->op_flags);

	if (is_read_io(fio->op))
@@ -753,6 +760,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
	if (fio->io_wbc && !is_read_io(fio->op))
		wbc_account_io(fio->io_wbc, page, PAGE_SIZE);

	__attach_io_flag(fio);
	bio_set_op_attrs(bio, fio->op, fio->op_flags);

	inc_page_count(fio->sbi, is_read_io(fio->op) ?
@@ -947,6 +955,7 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
		f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
				       fio->page->index, fio,
				       GFP_NOIO);
		__attach_io_flag(fio);
		bio_set_op_attrs(bio, fio->op, fio->op_flags);

		add_bio_entry(fio->sbi, bio, page, fio->temp);
@@ -1076,7 +1085,7 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
	if (fscrypt_inode_uses_fs_layer_crypto(inode))
		post_read_steps |= 1 << STEP_DECRYPT;
	if (f2fs_compressed_file(inode))
		post_read_steps |= 1 << STEP_DECOMPRESS;
		post_read_steps |= 1 << STEP_DECOMPRESS_NOWQ;
	if (f2fs_need_verity(inode, first_idx))
		post_read_steps |= 1 << STEP_VERITY;

@@ -1918,6 +1927,25 @@ static int f2fs_xattr_fiemap(struct inode *inode,
	return (err < 0 ? err : 0);
}

static loff_t max_inode_blocks(struct inode *inode)
{
	loff_t result = ADDRS_PER_INODE(inode);
	loff_t leaf_count = ADDRS_PER_BLOCK(inode);

	/* two direct node blocks */
	result += (leaf_count * 2);

	/* two indirect node blocks */
	leaf_count *= NIDS_PER_BLOCK;
	result += (leaf_count * 2);

	/* one double indirect node block */
	leaf_count *= NIDS_PER_BLOCK;
	result += leaf_count;

	return result;
}

int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		u64 start, u64 len)
{
@@ -1927,6 +1955,8 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
	u64 logical = 0, phys = 0, size = 0;
	u32 flags = 0;
	int ret = 0;
	bool compr_cluster = false;
	unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;

	if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
		ret = f2fs_precache_extents(inode);
@@ -1961,6 +1991,9 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
	memset(&map_bh, 0, sizeof(struct buffer_head));
	map_bh.b_size = len;

	if (compr_cluster)
		map_bh.b_size = blk_to_logical(inode, cluster_size - 1);

	ret = get_data_block(inode, start_blk, &map_bh, 0,
					F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
	if (ret)
@@ -1971,7 +2004,7 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
		start_blk = next_pgofs;

		if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
					F2FS_I_SB(inode)->max_file_blocks))
						max_inode_blocks(inode)))
			goto prep_next;

		flags |= FIEMAP_EXTENT_LAST;
@@ -1983,11 +2016,38 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,

		ret = fiemap_fill_next_extent(fieinfo, logical,
				phys, size, flags);
		if (ret)
			goto out;
		size = 0;
	}

	if (start_blk > last_blk || ret)
	if (start_blk > last_blk)
		goto out;

	if (compr_cluster) {
		compr_cluster = false;


		logical = blk_to_logical(inode, start_blk - 1);
		phys = blk_to_logical(inode, map_bh.b_blocknr);
		size = blk_to_logical(inode, cluster_size);

		flags |= FIEMAP_EXTENT_ENCODED;

		start_blk += cluster_size - 1;

		if (start_blk > last_blk)
			goto out;

		goto prep_next;
	}

	if (map_bh.b_blocknr == COMPRESS_ADDR) {
		compr_cluster = true;
		start_blk++;
		goto prep_next;
	}

	logical = blk_to_logical(inode, start_blk);
	phys = blk_to_logical(inode, map_bh.b_blocknr);
	size = map_bh.b_size;
@@ -2225,6 +2285,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
	for (i = 0; i < dic->nr_cpages; i++) {
		struct page *page = dic->cpages[i];
		block_t blkaddr;
		struct bio_post_read_ctx *ctx;

		blkaddr = data_blkaddr(dn.inode, dn.node_page,
						dn.ofs_in_node + i + 1);
@@ -2243,16 +2304,16 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
					page->index, for_write);
			if (IS_ERR(bio)) {
				ret = PTR_ERR(bio);
				bio = NULL;
				dic->failed = true;
				if (refcount_sub_and_test(dic->nr_cpages - i,
							&dic->ref))
							&dic->ref)) {
					f2fs_decompress_end_io(dic->rpages,
							cc->cluster_size, true,
							false);
					f2fs_free_dic(dic);
				}
				f2fs_put_dnode(&dn);
				*bio_ret = bio;
				*bio_ret = NULL;
				return ret;
			}
		}
@@ -2262,8 +2323,14 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
			goto submit_and_realloc;

		/* tag STEP_DECOMPRESS to handle IO in wq */
		ctx = bio->bi_private;
		if (!(ctx->enabled_steps & (1 << STEP_DECOMPRESS)))
			ctx->enabled_steps |= 1 << STEP_DECOMPRESS;

		inc_page_count(sbi, F2FS_RD_DATA);
		f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
		f2fs_update_iostat(sbi, FS_CDATA_READ_IO, F2FS_BLKSIZE);
		ClearPageError(page);
		*last_block_in_bio = blkaddr;
	}
@@ -2893,7 +2960,6 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
	pgoff_t index;
	pgoff_t end;		/* Inclusive */
	pgoff_t done_index;
	int cycled;
	int range_whole = 0;
	int tag;
	int nwritten = 0;
@@ -2911,17 +2977,12 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
	if (wbc->range_cyclic) {
		writeback_index = mapping->writeback_index; /* prev offset */
		index = writeback_index;
		if (index == 0)
			cycled = 1;
		else
			cycled = 0;
		end = -1;
	} else {
		index = wbc->range_start >> PAGE_SHIFT;
		end = wbc->range_end >> PAGE_SHIFT;
		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
			range_whole = 1;
		cycled = 1; /* ignore range_cyclic tests */
	}
	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
		tag = PAGECACHE_TAG_TOWRITE;
@@ -3086,12 +3147,13 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
		}
	}
#endif
	if ((!cycled && !done) || retry) {
		cycled = 1;
	if (retry) {
		index = 0;
		end = writeback_index - 1;
		end = -1;
		goto retry;
	}
	if (wbc->range_cyclic && !done)
		done_index = 0;
	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
		mapping->writeback_index = done_index;

@@ -3747,6 +3809,37 @@ static int f2fs_set_data_page_dirty(struct page *page)
	return 0;
}


static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
{
#ifdef CONFIG_F2FS_FS_COMPRESSION
	struct dnode_of_data dn;
	sector_t start_idx, blknr = 0;
	int ret;

	start_idx = round_down(block, F2FS_I(inode)->i_cluster_size);

	set_new_dnode(&dn, inode, NULL, NULL, 0);
	ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
	if (ret)
		return 0;

	if (dn.data_blkaddr != COMPRESS_ADDR) {
		dn.ofs_in_node += block - start_idx;
		blknr = f2fs_data_blkaddr(&dn);
		if (!__is_valid_data_blkaddr(blknr))
			blknr = 0;
	}

	f2fs_put_dnode(&dn);

	return blknr;
#else
	return -EOPNOTSUPP;
#endif
}


static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
	struct inode *inode = mapping->host;
@@ -3758,6 +3851,9 @@ static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
		filemap_write_and_wait(mapping);

	if (f2fs_compressed_file(inode))
		return f2fs_bmap_compress(inode, block);

	return generic_block_bmap(mapping, block, get_data_block_bmap);
}

Loading