Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e93b1cc8 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull quota, fsnotify and ext2 updates from Jan Kara:
 "Changes to locking of some quota operations from dedicated quota mutex
  to s_umount semaphore, a fsnotify fix and a simple ext2 fix"

* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs:
  quota: Fix bogus warning in dquot_disable()
  fsnotify: Fix possible use-after-free in inode iteration on umount
  ext2: reject inodes with negative size
  quota: Remove dqonoff_mutex
  ocfs2: Use s_umount for quota recovery protection
  quota: Remove dqonoff_mutex from dquot_scan_active()
  ocfs2: Protect periodic quota syncing with s_umount semaphore
  quota: Use s_umount protection for quota operations
  quota: Hold s_umount in exclusive mode when enabling / disabling quotas
  fs: Provide function to get superblock with exclusive s_umount
parents 45d36906 2700e606
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -1478,6 +1478,10 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
		inode->i_size |= ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
	else
		ei->i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl);
	if (i_size_read(inode) < 0) {
		ret = -EFSCORRUPTED;
		goto bad_inode;
	}
	ei->i_dtime = 0;
	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
	ei->i_state = 0;
+9 −36
Original line number Diff line number Diff line
@@ -150,12 +150,10 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
 */
void fsnotify_unmount_inodes(struct super_block *sb)
{
	struct inode *inode, *next_i, *need_iput = NULL;
	struct inode *inode, *iput_inode = NULL;

	spin_lock(&sb->s_inode_list_lock);
	list_for_each_entry_safe(inode, next_i, &sb->s_inodes, i_sb_list) {
		struct inode *need_iput_tmp;

	list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
		/*
		 * We cannot __iget() an inode in state I_FREEING,
		 * I_WILL_FREE, or I_NEW which is fine because by that point
@@ -178,49 +176,24 @@ void fsnotify_unmount_inodes(struct super_block *sb)
			continue;
		}

		need_iput_tmp = need_iput;
		need_iput = NULL;

		/* In case fsnotify_inode_delete() drops a reference. */
		if (inode != need_iput_tmp)
		__iget(inode);
		else
			need_iput_tmp = NULL;
		spin_unlock(&inode->i_lock);

		/* In case the dropping of a reference would nuke next_i. */
		while (&next_i->i_sb_list != &sb->s_inodes) {
			spin_lock(&next_i->i_lock);
			if (!(next_i->i_state & (I_FREEING | I_WILL_FREE)) &&
						atomic_read(&next_i->i_count)) {
				__iget(next_i);
				need_iput = next_i;
				spin_unlock(&next_i->i_lock);
				break;
			}
			spin_unlock(&next_i->i_lock);
			next_i = list_next_entry(next_i, i_sb_list);
		}

		/*
		 * We can safely drop s_inode_list_lock here because either
		 * we actually hold references on both inode and next_i or
		 * end of list.  Also no new inodes will be added since the
		 * umount has begun.
		 */
		spin_unlock(&sb->s_inode_list_lock);

		if (need_iput_tmp)
			iput(need_iput_tmp);
		if (iput_inode)
			iput(iput_inode);

		/* for each watch, send FS_UNMOUNT and then remove it */
		fsnotify(inode, FS_UNMOUNT, inode, FSNOTIFY_EVENT_INODE, NULL, 0);

		fsnotify_inode_delete(inode);

		iput(inode);
		iput_inode = inode;

		spin_lock(&sb->s_inode_list_lock);
	}
	spin_unlock(&sb->s_inode_list_lock);

	if (iput_inode)
		iput(iput_inode);
}
+9 −1
Original line number Diff line number Diff line
@@ -634,7 +634,15 @@ static void qsync_work_fn(struct work_struct *work)
						      dqi_sync_work.work);
	struct super_block *sb = oinfo->dqi_gqinode->i_sb;

	/*
	 * We have to be careful here not to deadlock on s_umount as umount
	 * disabling quotas may be in progress and it waits for this work to
	 * complete. If trylock fails, we'll do the sync next time...
	 */
	if (down_read_trylock(&sb->s_umount)) {
		dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type);
		up_read(&sb->s_umount);
	}
	schedule_delayed_work(&oinfo->dqi_sync_work,
			      msecs_to_jiffies(oinfo->dqi_syncms));
}
+7 −4
Original line number Diff line number Diff line
@@ -454,7 +454,7 @@ struct ocfs2_quota_recovery *ocfs2_begin_quota_recovery(
/* Sync changes in local quota file into global quota file and
 * reinitialize local quota file.
 * The function expects local quota file to be already locked and
 * dqonoff_mutex locked. */
 * s_umount locked in shared mode. */
static int ocfs2_recover_local_quota_file(struct inode *lqinode,
					  int type,
					  struct ocfs2_quota_recovery *rec)
@@ -597,7 +597,7 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
	printk(KERN_NOTICE "ocfs2: Finishing quota recovery on device (%s) for "
	       "slot %u\n", osb->dev_str, slot_num);

	mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
	down_read(&sb->s_umount);
	for (type = 0; type < OCFS2_MAXQUOTAS; type++) {
		if (list_empty(&(rec->r_list[type])))
			continue;
@@ -674,7 +674,7 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
			break;
	}
out:
	mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
	up_read(&sb->s_umount);
	kfree(rec);
	return status;
}
@@ -840,7 +840,10 @@ static int ocfs2_local_free_info(struct super_block *sb, int type)
	}
	ocfs2_release_local_quota_bitmaps(&oinfo->dqi_chunk);

	/* dqonoff_mutex protects us against racing with recovery thread... */
	/*
	 * s_umount held in exclusive mode protects us against racing with
	 * recovery thread...
	 */
	if (oinfo->dqi_rec) {
		ocfs2_free_quota_recovery(oinfo->dqi_rec);
		mark_clean = 0;
+0 −1
Original line number Diff line number Diff line
@@ -985,7 +985,6 @@ static void ocfs2_disable_quotas(struct ocfs2_super *osb)
	for (type = 0; type < OCFS2_MAXQUOTAS; type++) {
		if (!sb_has_quota_loaded(sb, type))
			continue;
		/* Cancel periodic syncing before we grab dqonoff_mutex */
		oinfo = sb_dqinfo(sb, type)->dqi_priv;
		cancel_delayed_work_sync(&oinfo->dqi_sync_work);
		inode = igrab(sb->s_dquot.files[type]);
Loading