Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 268157ba authored by Jan Kara's avatar Jan Kara
Browse files

quota: Coding style fixes



Wrap long lines, remove assignments from conditions, rewrite two
overcomplicated for loops.

Signed-off-by: default avatarJan Kara <jack@suse.cz>
parent 7a2435d8
Loading
Loading
Loading
Loading
+114 −67
Original line number Diff line number Diff line
@@ -156,7 +156,9 @@ void unregister_quota_format(struct quota_format_type *fmt)
	struct quota_format_type **actqf;

	spin_lock(&dq_list_lock);
	for (actqf = &quota_formats; *actqf && *actqf != fmt; actqf = &(*actqf)->qf_next);
	for (actqf = &quota_formats; *actqf && *actqf != fmt;
	     actqf = &(*actqf)->qf_next)
		;
	if (*actqf)
		*actqf = (*actqf)->qf_next;
	spin_unlock(&dq_list_lock);
@@ -168,18 +170,25 @@ static struct quota_format_type *find_quota_format(int id)
	struct quota_format_type *actqf;

	spin_lock(&dq_list_lock);
	for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next);
	for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
	     actqf = actqf->qf_next)
		;
	if (!actqf || !try_module_get(actqf->qf_owner)) {
		int qm;

		spin_unlock(&dq_list_lock);
		
		for (qm = 0; module_names[qm].qm_fmt_id && module_names[qm].qm_fmt_id != id; qm++);
		if (!module_names[qm].qm_fmt_id || request_module(module_names[qm].qm_mod_name))
		for (qm = 0; module_names[qm].qm_fmt_id &&
			     module_names[qm].qm_fmt_id != id; qm++)
			;
		if (!module_names[qm].qm_fmt_id ||
		    request_module(module_names[qm].qm_mod_name))
			return NULL;

		spin_lock(&dq_list_lock);
		for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id; actqf = actqf->qf_next);
		for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
		     actqf = actqf->qf_next)
			;
		if (actqf && !try_module_get(actqf->qf_owner))
			actqf = NULL;
	}
@@ -234,7 +243,8 @@ hashfn(const struct super_block *sb, unsigned int id, int type)
 */
static inline void insert_dquot_hash(struct dquot *dquot)
{
	struct hlist_head *head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type);
	struct hlist_head *head;
	head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id, dquot->dq_type);
	hlist_add_head(&dquot->dq_hash, head);
}

@@ -251,7 +261,8 @@ static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,

	hlist_for_each (node, dquot_hash+hashent) {
		dquot = hlist_entry(node, struct dquot, dq_hash);
		if (dquot->dq_sb == sb && dquot->dq_id == id && dquot->dq_type == type)
		if (dquot->dq_sb == sb && dquot->dq_id == id &&
		    dquot->dq_type == type)
			return dquot;
	}
	return NULL;
@@ -351,8 +362,10 @@ int dquot_acquire(struct dquot *dquot)
	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
		ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
		/* Write the info if needed */
		if (info_dirty(&dqopt->info[dquot->dq_type]))
			ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type);
		if (info_dirty(&dqopt->info[dquot->dq_type])) {
			ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
						dquot->dq_sb, dquot->dq_type);
		}
		if (ret < 0)
			goto out_iolock;
		if (ret2 < 0) {
@@ -387,8 +400,10 @@ int dquot_commit(struct dquot *dquot)
	 * => we have better not writing it */
	if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
		ret = dqopt->ops[dquot->dq_type]->commit_dqblk(dquot);
		if (info_dirty(&dqopt->info[dquot->dq_type]))
			ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type);
		if (info_dirty(&dqopt->info[dquot->dq_type])) {
			ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
						dquot->dq_sb, dquot->dq_type);
		}
		if (ret >= 0)
			ret = ret2;
	}
@@ -414,8 +429,10 @@ int dquot_release(struct dquot *dquot)
	if (dqopt->ops[dquot->dq_type]->release_dqblk) {
		ret = dqopt->ops[dquot->dq_type]->release_dqblk(dquot);
		/* Write the info */
		if (info_dirty(&dqopt->info[dquot->dq_type]))
			ret2 = dqopt->ops[dquot->dq_type]->write_file_info(dquot->dq_sb, dquot->dq_type);
		if (info_dirty(&dqopt->info[dquot->dq_type])) {
			ret2 = dqopt->ops[dquot->dq_type]->write_file_info(
						dquot->dq_sb, dquot->dq_type);
		}
		if (ret >= 0)
			ret = ret2;
	}
@@ -543,7 +560,8 @@ int vfs_quota_sync(struct super_block *sb, int type)
		spin_lock(&dq_list_lock);
		dirty = &dqopt->info[cnt].dqi_dirty_list;
		while (!list_empty(dirty)) {
			dquot = list_first_entry(dirty, struct dquot, dq_dirty);
			dquot = list_first_entry(dirty, struct dquot,
						 dq_dirty);
			/* Dirty and inactive can be only bad dquot... */
			if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
				clear_dquot_dirty(dquot);
@@ -763,11 +781,12 @@ struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
		dqstats.lookups++;
		spin_unlock(&dq_list_lock);
	}
	/* Wait for dq_lock - after this we know that either dquot_release() is already
	 * finished or it will be canceled due to dq_count > 1 test */
	/* Wait for dq_lock - after this we know that either dquot_release() is
	 * already finished or it will be canceled due to dq_count > 1 test */
	wait_on_dquot(dquot);
	/* Read the dquot and instantiate it (everything done only if needed) */
	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && sb->dq_op->acquire_dquot(dquot) < 0) {
	/* Read the dquot / allocate space in quota file */
	if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) &&
	    sb->dq_op->acquire_dquot(dquot) < 0) {
		dqput(dquot);
		dquot = NULL;
		goto out;
@@ -828,7 +847,10 @@ static void add_dquot_ref(struct super_block *sb, int type)
	iput(old_inode);
}

/* Return 0 if dqput() won't block (note that 1 doesn't necessarily mean blocking) */
/*
 * Return 0 if dqput() won't block.
 * (note that 1 doesn't necessarily mean blocking)
 */
static inline int dqput_blocks(struct dquot *dquot)
{
	if (atomic_read(&dquot->dq_count) <= 1)
@@ -836,8 +858,11 @@ static inline int dqput_blocks(struct dquot *dquot)
	return 0;
}

/* Remove references to dquots from inode - add dquot to list for freeing if needed */
/* We can't race with anybody because we hold dqptr_sem for writing... */
/*
 * Remove references to dquots from inode and add dquot to list for freeing
 * if we have the last referece to dquot
 * We can't race with anybody because we hold dqptr_sem for writing...
 */
static int remove_inode_dquot_ref(struct inode *inode, int type,
				  struct list_head *tofree_head)
{
@@ -851,7 +876,9 @@ static int remove_inode_dquot_ref(struct inode *inode, int type,
				printk(KERN_WARNING "VFS: Adding dquot with dq_count %d to dispose list.\n", atomic_read(&dquot->dq_count));
#endif
			spin_lock(&dq_list_lock);
			list_add(&dquot->dq_free, tofree_head);	/* As dquot must have currently users it can't be on the free list... */
			/* As dquot must have currently users it can't be on
			 * the free list... */
			list_add(&dquot->dq_free, tofree_head);
			spin_unlock(&dq_list_lock);
			return 1;
		}
@@ -861,19 +888,22 @@ static int remove_inode_dquot_ref(struct inode *inode, int type,
	return 0;
}

/* Free list of dquots - called from inode.c */
/* dquots are removed from inodes, no new references can be got so we are the only ones holding reference */
/*
 * Free list of dquots
 * Dquots are removed from inodes and no new references can be got so we are
 * the only ones holding reference
 */
static void put_dquot_list(struct list_head *tofree_head)
{
	struct list_head *act_head;
	struct dquot *dquot;

	act_head = tofree_head->next;
	/* So now we have dquots on the list... Just free them */
	while (act_head != tofree_head) {
		dquot = list_entry(act_head, struct dquot, dq_free);
		act_head = act_head->next;
		list_del_init(&dquot->dq_free);	/* Remove dquot from the list so we won't have problems... */
		/* Remove dquot from the list so we won't have problems... */
		list_del_init(&dquot->dq_free);
		dqput(dquot);
	}
}
@@ -1131,37 +1161,42 @@ static int ignore_hardlimit(struct dquot *dquot)
	struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];

	return capable(CAP_SYS_RESOURCE) &&
	    (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || !(info->dqi_flags & V1_DQF_RSQUASH));
	       (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
		!(info->dqi_flags & V1_DQF_RSQUASH));
}

/* needs dq_data_lock */
static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype)
{
	qsize_t newinodes = dquot->dq_dqb.dqb_curinodes + inodes;

	*warntype = QUOTA_NL_NOWARN;
	if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) ||
	    test_bit(DQ_FAKE_B, &dquot->dq_flags))
		return QUOTA_OK;

	if (dquot->dq_dqb.dqb_ihardlimit &&
	   (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_ihardlimit &&
	    newinodes > dquot->dq_dqb.dqb_ihardlimit &&
            !ignore_hardlimit(dquot)) {
		*warntype = QUOTA_NL_IHARDWARN;
		return NO_QUOTA;
	}

	if (dquot->dq_dqb.dqb_isoftlimit &&
	   (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit &&
	    dquot->dq_dqb.dqb_itime && get_seconds() >= dquot->dq_dqb.dqb_itime &&
	    newinodes > dquot->dq_dqb.dqb_isoftlimit &&
	    dquot->dq_dqb.dqb_itime &&
	    get_seconds() >= dquot->dq_dqb.dqb_itime &&
            !ignore_hardlimit(dquot)) {
		*warntype = QUOTA_NL_ISOFTLONGWARN;
		return NO_QUOTA;
	}

	if (dquot->dq_dqb.dqb_isoftlimit &&
	   (dquot->dq_dqb.dqb_curinodes + inodes) > dquot->dq_dqb.dqb_isoftlimit &&
	    newinodes > dquot->dq_dqb.dqb_isoftlimit &&
	    dquot->dq_dqb.dqb_itime == 0) {
		*warntype = QUOTA_NL_ISOFTWARN;
		dquot->dq_dqb.dqb_itime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
		dquot->dq_dqb.dqb_itime = get_seconds() +
		    sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
	}

	return QUOTA_OK;
@@ -1171,9 +1206,10 @@ static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype)
static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype)
{
	qsize_t tspace;
	struct super_block *sb = dquot->dq_sb;

	*warntype = QUOTA_NL_NOWARN;
	if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) ||
	if (!sb_has_quota_limits_enabled(sb, dquot->dq_type) ||
	    test_bit(DQ_FAKE_B, &dquot->dq_flags))
		return QUOTA_OK;

@@ -1190,7 +1226,8 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war

	if (dquot->dq_dqb.dqb_bsoftlimit &&
	    tspace > dquot->dq_dqb.dqb_bsoftlimit &&
	    dquot->dq_dqb.dqb_btime && get_seconds() >= dquot->dq_dqb.dqb_btime &&
	    dquot->dq_dqb.dqb_btime &&
	    get_seconds() >= dquot->dq_dqb.dqb_btime &&
            !ignore_hardlimit(dquot)) {
		if (!prealloc)
			*warntype = QUOTA_NL_BSOFTLONGWARN;
@@ -1202,7 +1239,8 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war
	    dquot->dq_dqb.dqb_btime == 0) {
		if (!prealloc) {
			*warntype = QUOTA_NL_BSOFTWARN;
			dquot->dq_dqb.dqb_btime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace;
			dquot->dq_dqb.dqb_btime = get_seconds() +
			    sb_dqopt(sb)->info[dquot->dq_type].dqi_bgrace;
		}
		else
			/*
@@ -1217,15 +1255,18 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war

static int info_idq_free(struct dquot *dquot, qsize_t inodes)
{
	qsize_t newinodes;

	if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
	    dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
	    !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type))
		return QUOTA_NL_NOWARN;

	if (dquot->dq_dqb.dqb_curinodes - inodes <= dquot->dq_dqb.dqb_isoftlimit)
	newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
	if (newinodes <= dquot->dq_dqb.dqb_isoftlimit)
		return QUOTA_NL_ISOFTBELOW;
	if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
	    dquot->dq_dqb.dqb_curinodes - inodes < dquot->dq_dqb.dqb_ihardlimit)
	    newinodes < dquot->dq_dqb.dqb_ihardlimit)
		return QUOTA_NL_IHARDBELOW;
	return QUOTA_NL_NOWARN;
}
@@ -1466,7 +1507,8 @@ int dquot_alloc_inode(const struct inode *inode, qsize_t number)
	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
		if (!inode->i_dquot[cnt])
			continue;
		if (check_idq(inode->i_dquot[cnt], number, warntype+cnt) == NO_QUOTA)
		if (check_idq(inode->i_dquot[cnt], number, warntype+cnt)
		    == NO_QUOTA)
			goto warn_put_all;
	}

@@ -1673,19 +1715,13 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
		transfer_from[cnt] = NULL;
		transfer_to[cnt] = NULL;
		warntype_to[cnt] = QUOTA_NL_NOWARN;
		switch (cnt) {
			case USRQUOTA:
				if (!chuid)
					continue;
				transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_uid, cnt);
				break;
			case GRPQUOTA:
				if (!chgid)
					continue;
				transfer_to[cnt] = dqget(inode->i_sb, iattr->ia_gid, cnt);
				break;
		}
	}
	if (chuid)
		transfer_to[USRQUOTA] = dqget(inode->i_sb, iattr->ia_uid,
					      USRQUOTA);
	if (chgid)
		transfer_to[GRPQUOTA] = dqget(inode->i_sb, iattr->ia_gid,
					      GRPQUOTA);

	down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
	/* Now recheck reliably when holding dqptr_sem */
@@ -1881,8 +1917,8 @@ int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags)
		drop_dquot_ref(sb, cnt);
		invalidate_dquots(sb, cnt);
		/*
		 * Now all dquots should be invalidated, all writes done so we should be only
		 * users of the info. No locks needed.
		 * Now all dquots should be invalidated, all writes done so we
		 * should be only users of the info. No locks needed.
		 */
		if (info_dirty(&dqopt->info[cnt]))
			sb->dq_op->write_info(sb, cnt);
@@ -1920,10 +1956,12 @@ int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags)
			/* If quota was reenabled in the meantime, we have
			 * nothing to do */
			if (!sb_has_quota_loaded(sb, cnt)) {
				mutex_lock_nested(&toputinode[cnt]->i_mutex, I_MUTEX_QUOTA);
				mutex_lock_nested(&toputinode[cnt]->i_mutex,
						  I_MUTEX_QUOTA);
				toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
				  S_NOATIME | S_NOQUOTA);
				truncate_inode_pages(&toputinode[cnt]->i_data, 0);
				truncate_inode_pages(&toputinode[cnt]->i_data,
						     0);
				mutex_unlock(&toputinode[cnt]->i_mutex);
				mark_inode_dirty(toputinode[cnt]);
			}
@@ -2013,7 +2051,8 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
		 * possible) Also nobody should write to the file - we use
		 * special IO operations which ignore the immutable bit. */
		down_write(&dqopt->dqptr_sem);
		oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | S_NOQUOTA);
		oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
					     S_NOQUOTA);
		inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
		up_write(&dqopt->dqptr_sem);
		sb->dq_op->drop(inode);
@@ -2032,7 +2071,8 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
	dqopt->info[type].dqi_fmt_id = format_id;
	INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
	mutex_lock(&dqopt->dqio_mutex);
	if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) {
	error = dqopt->ops[type]->read_file_info(sb, type);
	if (error < 0) {
		mutex_unlock(&dqopt->dqio_mutex);
		goto out_file_init;
	}
@@ -2254,7 +2294,8 @@ static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di)
	spin_unlock(&dq_data_lock);
}

int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di)
int vfs_get_dqblk(struct super_block *sb, int type, qid_t id,
		  struct if_dqblk *di)
{
	struct dquot *dquot;

@@ -2318,22 +2359,25 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
	}

	if (check_blim) {
		if (!dm->dqb_bsoftlimit || dm->dqb_curspace < dm->dqb_bsoftlimit) {
		if (!dm->dqb_bsoftlimit ||
		    dm->dqb_curspace < dm->dqb_bsoftlimit) {
			dm->dqb_btime = 0;
			clear_bit(DQ_BLKS_B, &dquot->dq_flags);
		}
		else if (!(di->dqb_valid & QIF_BTIME))	/* Set grace only if user hasn't provided his own... */
		} else if (!(di->dqb_valid & QIF_BTIME))
			/* Set grace only if user hasn't provided his own... */
			dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
	}
	if (check_ilim) {
		if (!dm->dqb_isoftlimit || dm->dqb_curinodes < dm->dqb_isoftlimit) {
		if (!dm->dqb_isoftlimit ||
		    dm->dqb_curinodes < dm->dqb_isoftlimit) {
			dm->dqb_itime = 0;
			clear_bit(DQ_INODES_B, &dquot->dq_flags);
		}
		else if (!(di->dqb_valid & QIF_ITIME))	/* Set grace only if user hasn't provided his own... */
		} else if (!(di->dqb_valid & QIF_ITIME))
			/* Set grace only if user hasn't provided his own... */
			dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
	}
	if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || dm->dqb_isoftlimit)
	if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit ||
	    dm->dqb_isoftlimit)
		clear_bit(DQ_FAKE_B, &dquot->dq_flags);
	else
		set_bit(DQ_FAKE_B, &dquot->dq_flags);
@@ -2343,7 +2387,8 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
	return 0;
}

int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di)
int vfs_set_dqblk(struct super_block *sb, int type, qid_t id,
		  struct if_dqblk *di)
{
	struct dquot *dquot;
	int rc;
@@ -2400,7 +2445,8 @@ int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
	if (ii->dqi_valid & IIF_IGRACE)
		mi->dqi_igrace = ii->dqi_igrace;
	if (ii->dqi_valid & IIF_FLAGS)
		mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) | (ii->dqi_flags & DQF_MASK);
		mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) |
				(ii->dqi_flags & DQF_MASK);
	spin_unlock(&dq_data_lock);
	mark_info_dirty(sb, type);
	/* Force write to disk */
@@ -2559,7 +2605,8 @@ static int __init dquot_init(void)

#ifdef CONFIG_QUOTA_NETLINK_INTERFACE
	if (genl_register_family(&quota_genl_family) != 0)
		printk(KERN_ERR "VFS: Failed to create quota netlink interface.\n");
		printk(KERN_ERR
		       "VFS: Failed to create quota netlink interface.\n");
#endif

	return 0;
+23 −12
Original line number Diff line number Diff line
@@ -20,7 +20,8 @@
#include <linux/types.h>

/* Check validity of generic quotactl commands */
static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id)
static int generic_quotactl_valid(struct super_block *sb, int type, int cmd,
				  qid_t id)
{
	if (type >= MAXQUOTAS)
		return -EINVAL;
@@ -72,7 +73,8 @@ static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid
		case Q_SETINFO:
		case Q_SETQUOTA:
		case Q_GETQUOTA:
			/* This is just informative test so we are satisfied without a lock */
			/* This is just an informative test so we are satisfied
			 * without the lock */
			if (!sb_has_quota_active(sb, type))
				return -ESRCH;
	}
@@ -92,7 +94,8 @@ static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid
}

/* Check validity of XFS Quota Manager commands */
static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id)
static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd,
			      qid_t id)
{
	if (type >= XQM_MAXQUOTAS)
		return -EINVAL;
@@ -142,7 +145,8 @@ static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t i
	return 0;
}

static int check_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id)
static int check_quotactl_valid(struct super_block *sb, int type, int cmd,
				qid_t id)
{
	int error;

@@ -180,7 +184,8 @@ static void quota_sync_sb(struct super_block *sb, int type)
			continue;
		if (!sb_has_quota_active(sb, cnt))
			continue;
		mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, I_MUTEX_QUOTA);
		mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex,
				  I_MUTEX_QUOTA);
		truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0);
		mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex);
	}
@@ -200,7 +205,8 @@ void sync_dquots(struct super_block *sb, int type)
	spin_lock(&sb_lock);
restart:
	list_for_each_entry(sb, &super_blocks, s_list) {
		/* This test just improves performance so it needn't be reliable... */
		/* This test just improves performance so it needn't be
		 * reliable... */
		for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
			if (type != -1 && type != cnt)
				continue;
@@ -227,7 +233,8 @@ void sync_dquots(struct super_block *sb, int type)
}

/* Copy parameters and call proper function */
static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void __user *addr)
static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
		       void __user *addr)
{
	int ret;

@@ -235,7 +242,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
		case Q_QUOTAON: {
			char *pathname;

			if (IS_ERR(pathname = getname(addr)))
			pathname = getname(addr);
			if (IS_ERR(pathname))
				return PTR_ERR(pathname);
			ret = sb->s_qcop->quota_on(sb, type, id, pathname, 0);
			putname(pathname);
@@ -261,7 +269,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
		case Q_GETINFO: {
			struct if_dqinfo info;

			if ((ret = sb->s_qcop->get_info(sb, type, &info)))
			ret = sb->s_qcop->get_info(sb, type, &info);
			if (ret)
				return ret;
			if (copy_to_user(addr, &info, sizeof(info)))
				return -EFAULT;
@@ -277,7 +286,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
		case Q_GETQUOTA: {
			struct if_dqblk idq;

			if ((ret = sb->s_qcop->get_dqblk(sb, type, id, &idq)))
			ret = sb->s_qcop->get_dqblk(sb, type, id, &idq);
			if (ret)
				return ret;
			if (copy_to_user(addr, &idq, sizeof(idq)))
				return -EFAULT;
@@ -322,7 +332,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
		case Q_XGETQUOTA: {
			struct fs_disk_quota fdq;

			if ((ret = sb->s_qcop->get_xquota(sb, type, id, &fdq)))
			ret = sb->s_qcop->get_xquota(sb, type, id, &fdq);
			if (ret)
				return ret;
			if (copy_to_user(addr, &fdq, sizeof(fdq)))
				return -EFAULT;
+27 −15
Original line number Diff line number Diff line
@@ -43,7 +43,8 @@ static char *getdqbuf(size_t size)
{
	char *buf = kmalloc(size, GFP_NOFS);
	if (!buf)
		printk(KERN_WARNING "VFS: Not enough memory for quota buffers.\n");
		printk(KERN_WARNING
		       "VFS: Not enough memory for quota buffers.\n");
	return buf;
}

@@ -113,7 +114,8 @@ static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk)
}

/* Remove given block from the list of blocks with free entries */
static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, uint blk)
static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
			       uint blk)
{
	char *tmpbuf = getdqbuf(info->dqi_usable_bs);
	struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
@@ -150,7 +152,9 @@ static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, uint bl
	dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
	/* No matter whether write succeeds block is out of list */
	if (write_blk(info, blk, buf) < 0)
		printk(KERN_ERR "VFS: Can't write block (%u) with free entries.\n", blk);
		printk(KERN_ERR
		       "VFS: Can't write block (%u) with free entries.\n",
		       blk);
	return 0;
out_buf:
	kfree(tmpbuf);
@@ -158,7 +162,8 @@ static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, uint bl
}

/* Insert given block to the beginning of list with free entries */
static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, uint blk)
static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
			       uint blk)
{
	char *tmpbuf = getdqbuf(info->dqi_usable_bs);
	struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
@@ -230,7 +235,8 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
			return 0;
		}
		memset(buf, 0, info->dqi_usable_bs);
		/* This is enough as block is already zeroed and entry list is empty... */
		/* This is enough as the block is already zeroed and the entry
		 * list is empty... */
		info->dqi_free_entry = blk;
		mark_info_dirty(dquot->dq_sb, dquot->dq_type);
	}
@@ -246,10 +252,12 @@ static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
	}
	le16_add_cpu(&dh->dqdh_entries, 1);
	/* Find free structure in block */
	for (i = 0, ddquot = buf + sizeof(struct qt_disk_dqdbheader);
	     i < qtree_dqstr_in_blk(info) && !qtree_entry_unused(info, ddquot);
	     i++, ddquot += info->dqi_entry_size)
		;
	ddquot = buf + sizeof(struct qt_disk_dqdbheader);
	for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
		if (qtree_entry_unused(info, ddquot))
			break;
		ddquot += info->dqi_entry_size;
	}
#ifdef __QUOTA_QT_PARANOIA
	if (i == qtree_dqstr_in_blk(info)) {
		printk(KERN_ERR "VFS: find_free_dqentry(): Data block full "
@@ -340,7 +348,8 @@ static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
}

/*
 *	We don't have to be afraid of deadlocks as we never have quotas on quota files...
 * We don't have to be afraid of deadlocks as we never have quotas on quota
 * files...
 */
int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
{
@@ -515,10 +524,12 @@ static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
		printk(KERN_ERR "VFS: Can't read quota tree block %u.\n", blk);
		goto out_buf;
	}
	for (i = 0, ddquot = buf + sizeof(struct qt_disk_dqdbheader);
	     i < qtree_dqstr_in_blk(info) && !info->dqi_ops->is_id(ddquot, dquot);
	     i++, ddquot += info->dqi_entry_size)
		;
	ddquot = buf + sizeof(struct qt_disk_dqdbheader);
	for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
		if (info->dqi_ops->is_id(ddquot, dquot))
			break;
		ddquot += info->dqi_entry_size;
	}
	if (i == qtree_dqstr_in_blk(info)) {
		printk(KERN_ERR "VFS: Quota for id %u referenced "
		  "but not present.\n", dquot->dq_id);
@@ -632,7 +643,8 @@ EXPORT_SYMBOL(qtree_read_dquot);
 * the only one operating on dquot (thanks to dq_lock) */
int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
{
	if (test_bit(DQ_FAKE_B, &dquot->dq_flags) && !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
	if (test_bit(DQ_FAKE_B, &dquot->dq_flags) &&
	    !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
		return qtree_delete_dquot(info, dquot);
	return 0;
}
+32 −16

File changed.

Preview size limit exceeded, changes collapsed.

+2 −1
Original line number Diff line number Diff line
@@ -54,7 +54,8 @@ static int v2_check_quota_file(struct super_block *sb, int type)
	static const uint quota_magics[] = V2_INITQMAGICS;
	static const uint quota_versions[] = V2_INITQVERSIONS;
 
	size = sb->s_op->quota_read(sb, type, (char *)&dqhead, sizeof(struct v2_disk_dqheader), 0);
	size = sb->s_op->quota_read(sb, type, (char *)&dqhead,
				    sizeof(struct v2_disk_dqheader), 0);
	if (size != sizeof(struct v2_disk_dqheader)) {
		printk("quota_v2: failed read expected=%zd got=%zd\n",
			sizeof(struct v2_disk_dqheader), size);