Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3fb75cb8 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull misc filesystem and quota fixes from Jan Kara:
 "Some smaller udf, ext2, quota & reiserfs fixes"

* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs:
  ext2: Unmap metadata when zeroing blocks
  udf: don't bother with full-page write optimisations in adinicb case
  reiserfs: Unlock superblock before calling reiserfs_quota_on_mount()
  udf: Remove useless check in udf_adinicb_write_begin()
  quota: fill in Q_XGETQSTAT inode information for inactive quotas
  ext2: Check return value from ext2_get_group_desc()
parents 687ee0ad 225c5161
Loading
Loading
Loading
Loading
+5 −0
Original line number Original line Diff line number Diff line
@@ -465,6 +465,11 @@ struct inode *ext2_new_inode(struct inode *dir, umode_t mode,


	for (i = 0; i < sbi->s_groups_count; i++) {
	for (i = 0; i < sbi->s_groups_count; i++) {
		gdp = ext2_get_group_desc(sb, group, &bh2);
		gdp = ext2_get_group_desc(sb, group, &bh2);
		if (!gdp) {
			if (++group == sbi->s_groups_count)
				group = 0;
			continue;
		}
		brelse(bitmap_bh);
		brelse(bitmap_bh);
		bitmap_bh = read_inode_bitmap(sb, group);
		bitmap_bh = read_inode_bitmap(sb, group);
		if (!bitmap_bh) {
		if (!bitmap_bh) {
+10 −0
Original line number Original line Diff line number Diff line
@@ -733,6 +733,16 @@ static int ext2_get_blocks(struct inode *inode,
	}
	}


	if (IS_DAX(inode)) {
	if (IS_DAX(inode)) {
		int i;

		/*
		 * We must unmap blocks before zeroing so that writeback cannot
		 * overwrite zeros with stale data from block device page cache.
		 */
		for (i = 0; i < count; i++) {
			unmap_underlying_metadata(inode->i_sb->s_bdev,
					le32_to_cpu(chain[depth-1].key) + i);
		}
		/*
		/*
		 * block must be initialised before we put it in the tree
		 * block must be initialised before we put it in the tree
		 * so that it's not found by another thread before it's
		 * so that it's not found by another thread before it's
+12 −6
Original line number Original line Diff line number Diff line
@@ -341,6 +341,7 @@ static int quota_getstate(struct super_block *sb, struct fs_quota_stat *fqs)
	struct qc_state state;
	struct qc_state state;
	int ret;
	int ret;


	memset(&state, 0, sizeof (struct qc_state));
	ret = sb->s_qcop->get_state(sb, &state);
	ret = sb->s_qcop->get_state(sb, &state);
	if (ret < 0)
	if (ret < 0)
		return ret;
		return ret;
@@ -365,17 +366,19 @@ static int quota_getstate(struct super_block *sb, struct fs_quota_stat *fqs)
	fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
	fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
	fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit;
	fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit;
	fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit;
	fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit;
	if (state.s_state[USRQUOTA].flags & QCI_ACCT_ENABLED) {

	/* Inodes may be allocated even if inactive; copy out if present */
	if (state.s_state[USRQUOTA].ino) {
		fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino;
		fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino;
		fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks;
		fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks;
		fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents;
		fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents;
	}
	}
	if (state.s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED) {
	if (state.s_state[GRPQUOTA].ino) {
		fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino;
		fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino;
		fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks;
		fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks;
		fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents;
		fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents;
	}
	}
	if (state.s_state[PRJQUOTA].flags & QCI_ACCT_ENABLED) {
	if (state.s_state[PRJQUOTA].ino) {
		/*
		/*
		 * Q_XGETQSTAT doesn't have room for both group and project
		 * Q_XGETQSTAT doesn't have room for both group and project
		 * quotas.  So, allow the project quota values to be copied out
		 * quotas.  So, allow the project quota values to be copied out
@@ -411,6 +414,7 @@ static int quota_getstatev(struct super_block *sb, struct fs_quota_statv *fqs)
	struct qc_state state;
	struct qc_state state;
	int ret;
	int ret;


	memset(&state, 0, sizeof (struct qc_state));
	ret = sb->s_qcop->get_state(sb, &state);
	ret = sb->s_qcop->get_state(sb, &state);
	if (ret < 0)
	if (ret < 0)
		return ret;
		return ret;
@@ -435,17 +439,19 @@ static int quota_getstatev(struct super_block *sb, struct fs_quota_statv *fqs)
	fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
	fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
	fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit;
	fqs->qs_bwarnlimit = state.s_state[type].spc_warnlimit;
	fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit;
	fqs->qs_iwarnlimit = state.s_state[type].ino_warnlimit;
	if (state.s_state[USRQUOTA].flags & QCI_ACCT_ENABLED) {

	/* Inodes may be allocated even if inactive; copy out if present */
	if (state.s_state[USRQUOTA].ino) {
		fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino;
		fqs->qs_uquota.qfs_ino = state.s_state[USRQUOTA].ino;
		fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks;
		fqs->qs_uquota.qfs_nblks = state.s_state[USRQUOTA].blocks;
		fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents;
		fqs->qs_uquota.qfs_nextents = state.s_state[USRQUOTA].nextents;
	}
	}
	if (state.s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED) {
	if (state.s_state[GRPQUOTA].ino) {
		fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino;
		fqs->qs_gquota.qfs_ino = state.s_state[GRPQUOTA].ino;
		fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks;
		fqs->qs_gquota.qfs_nblks = state.s_state[GRPQUOTA].blocks;
		fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents;
		fqs->qs_gquota.qfs_nextents = state.s_state[GRPQUOTA].nextents;
	}
	}
	if (state.s_state[PRJQUOTA].flags & QCI_ACCT_ENABLED) {
	if (state.s_state[PRJQUOTA].ino) {
		fqs->qs_pquota.qfs_ino = state.s_state[PRJQUOTA].ino;
		fqs->qs_pquota.qfs_ino = state.s_state[PRJQUOTA].ino;
		fqs->qs_pquota.qfs_nblks = state.s_state[PRJQUOTA].blocks;
		fqs->qs_pquota.qfs_nblks = state.s_state[PRJQUOTA].blocks;
		fqs->qs_pquota.qfs_nextents = state.s_state[PRJQUOTA].nextents;
		fqs->qs_pquota.qfs_nextents = state.s_state[PRJQUOTA].nextents;
+11 −1
Original line number Original line Diff line number Diff line
@@ -190,7 +190,15 @@ static int remove_save_link_only(struct super_block *s,
static int reiserfs_quota_on_mount(struct super_block *, int);
static int reiserfs_quota_on_mount(struct super_block *, int);
#endif
#endif


/* look for uncompleted unlinks and truncates and complete them */
/*
 * Look for uncompleted unlinks and truncates and complete them
 *
 * Called with superblock write locked.  If quotas are enabled, we have to
 * release/retake lest we call dquot_quota_on_mount(), proceed to
 * schedule_on_each_cpu() in invalidate_bdev() and deadlock waiting for the per
 * cpu worklets to complete flush_async_commits() that in turn wait for the
 * superblock write lock.
 */
static int finish_unfinished(struct super_block *s)
static int finish_unfinished(struct super_block *s)
{
{
	INITIALIZE_PATH(path);
	INITIALIZE_PATH(path);
@@ -237,7 +245,9 @@ static int finish_unfinished(struct super_block *s)
				quota_enabled[i] = 0;
				quota_enabled[i] = 0;
				continue;
				continue;
			}
			}
			reiserfs_write_unlock(s);
			ret = reiserfs_quota_on_mount(s, i);
			ret = reiserfs_quota_on_mount(s, i);
			reiserfs_write_lock(s);
			if (ret < 0)
			if (ret < 0)
				reiserfs_warning(s, "reiserfs-2500",
				reiserfs_warning(s, "reiserfs-2500",
						 "cannot turn on journaled "
						 "cannot turn on journaled "
+16 −2
Original line number Original line Diff line number Diff line
@@ -94,7 +94,7 @@ static int udf_adinicb_write_begin(struct file *file,
		return -ENOMEM;
		return -ENOMEM;
	*pagep = page;
	*pagep = page;


	if (!PageUptodate(page) && len != PAGE_SIZE)
	if (!PageUptodate(page))
		__udf_adinicb_readpage(page);
		__udf_adinicb_readpage(page);
	return 0;
	return 0;
}
}
@@ -105,11 +105,25 @@ static ssize_t udf_adinicb_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
	return 0;
	return 0;
}
}


static int udf_adinicb_write_end(struct file *file, struct address_space *mapping,
				 loff_t pos, unsigned len, unsigned copied,
				 struct page *page, void *fsdata)
{
	struct inode *inode = page->mapping->host;
	loff_t last_pos = pos + copied;
	if (last_pos > inode->i_size)
		i_size_write(inode, last_pos);
	set_page_dirty(page);
	unlock_page(page);
	put_page(page);
	return copied;
}

const struct address_space_operations udf_adinicb_aops = {
const struct address_space_operations udf_adinicb_aops = {
	.readpage	= udf_adinicb_readpage,
	.readpage	= udf_adinicb_readpage,
	.writepage	= udf_adinicb_writepage,
	.writepage	= udf_adinicb_writepage,
	.write_begin	= udf_adinicb_write_begin,
	.write_begin	= udf_adinicb_write_begin,
	.write_end	= simple_write_end,
	.write_end	= udf_adinicb_write_end,
	.direct_IO	= udf_adinicb_direct_IO,
	.direct_IO	= udf_adinicb_direct_IO,
};
};