Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 92bd85fa authored by Jan Kara's avatar Jan Kara
Browse files

Merge branch 'xfs-get-next-dquot-4.6' of...

Merge branch 'xfs-get-next-dquot-4.6' of git://git.kernel.org/pub/scm/linux/kernel/git/dgc/linux-xfs into for_next
parents 388f7b1d be607946
Loading
Loading
Loading
Loading
+64 −2
Original line number Diff line number Diff line
@@ -79,7 +79,7 @@ unsigned int qtype_enforce_flag(int type)
	return 0;
}

static int quota_quotaon(struct super_block *sb, int type, int cmd, qid_t id,
static int quota_quotaon(struct super_block *sb, int type, qid_t id,
		         struct path *path)
{
	if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_enable)
@@ -222,6 +222,34 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id,
	return 0;
}

/*
 * Return quota for next active quota >= this id, if any exists,
 * otherwise return -ESRCH via ->get_nextdqblk
 */
static int quota_getnextquota(struct super_block *sb, int type, qid_t id,
			  void __user *addr)
{
	struct kqid qid;
	struct qc_dqblk fdq;
	struct if_nextdqblk idq;
	int ret;

	if (!sb->s_qcop->get_nextdqblk)
		return -ENOSYS;
	qid = make_kqid(current_user_ns(), type, id);
	if (!qid_valid(qid))
		return -EINVAL;
	ret = sb->s_qcop->get_nextdqblk(sb, &qid, &fdq);
	if (ret)
		return ret;
	/* struct if_nextdqblk is a superset of struct if_dqblk */
	copy_to_if_dqblk((struct if_dqblk *)&idq, &fdq);
	idq.dqb_id = from_kqid(current_user_ns(), qid);
	if (copy_to_user(addr, &idq, sizeof(idq)))
		return -EFAULT;
	return 0;
}

static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src)
{
	dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit);
@@ -625,6 +653,34 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id,
	return ret;
}

/*
 * Return quota for next active quota >= this id, if any exists,
 * otherwise return -ESRCH via ->get_nextdqblk.
 */
static int quota_getnextxquota(struct super_block *sb, int type, qid_t id,
			    void __user *addr)
{
	struct fs_disk_quota fdq;
	struct qc_dqblk qdq;
	struct kqid qid;
	qid_t id_out;
	int ret;

	if (!sb->s_qcop->get_nextdqblk)
		return -ENOSYS;
	qid = make_kqid(current_user_ns(), type, id);
	if (!qid_valid(qid))
		return -EINVAL;
	ret = sb->s_qcop->get_nextdqblk(sb, &qid, &qdq);
	if (ret)
		return ret;
	id_out = from_kqid(current_user_ns(), qid);
	copy_to_xfs_dqblk(&fdq, &qdq, type, id_out);
	if (copy_to_user(addr, &fdq, sizeof(fdq)))
		return -EFAULT;
	return ret;
}

static int quota_rmxquota(struct super_block *sb, void __user *addr)
{
	__u32 flags;
@@ -659,7 +715,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,

	switch (cmd) {
	case Q_QUOTAON:
		return quota_quotaon(sb, type, cmd, id, path);
		return quota_quotaon(sb, type, id, path);
	case Q_QUOTAOFF:
		return quota_quotaoff(sb, type);
	case Q_GETFMT:
@@ -670,6 +726,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
		return quota_setinfo(sb, type, addr);
	case Q_GETQUOTA:
		return quota_getquota(sb, type, id, addr);
	case Q_GETNEXTQUOTA:
		return quota_getnextquota(sb, type, id, addr);
	case Q_SETQUOTA:
		return quota_setquota(sb, type, id, addr);
	case Q_SYNC:
@@ -690,6 +748,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
		return quota_setxquota(sb, type, id, addr);
	case Q_XGETQUOTA:
		return quota_getxquota(sb, type, id, addr);
	case Q_XGETNEXTQUOTA:
		return quota_getnextxquota(sb, type, id, addr);
	case Q_XQUOTASYNC:
		if (sb->s_flags & MS_RDONLY)
			return -EROFS;
@@ -708,10 +768,12 @@ static int quotactl_cmd_write(int cmd)
	switch (cmd) {
	case Q_GETFMT:
	case Q_GETINFO:
	case Q_GETNEXTQUOTA:
	case Q_SYNC:
	case Q_XGETQSTAT:
	case Q_XGETQSTATV:
	case Q_XGETQUOTA:
	case Q_XGETNEXTQUOTA:
	case Q_XQUOTASYNC:
		return 0;
	}
+2 −1
Original line number Diff line number Diff line
@@ -37,7 +37,7 @@ typedef __uint16_t xfs_qwarncnt_t;
#define XFS_DQ_PROJ		0x0002		/* project quota */
#define XFS_DQ_GROUP		0x0004		/* a group quota */
#define XFS_DQ_DIRTY		0x0008		/* dquot is dirty */
#define XFS_DQ_FREEING		0x0010		/* dquot is beeing torn down */
#define XFS_DQ_FREEING		0x0010		/* dquot is being torn down */

#define XFS_DQ_ALLTYPES		(XFS_DQ_USER|XFS_DQ_PROJ|XFS_DQ_GROUP)

@@ -116,6 +116,7 @@ typedef __uint16_t xfs_qwarncnt_t;
#define XFS_QMOPT_DQREPAIR	0x0001000 /* repair dquot if damaged */
#define XFS_QMOPT_GQUOTA	0x0002000 /* group dquot requested */
#define XFS_QMOPT_ENOSPC	0x0004000 /* enospc instead of edquot (prj) */
#define XFS_QMOPT_DQNEXT	0x0008000 /* return next dquot >= this ID */

/*
 * flags to xfs_trans_mod_dquot to indicate which field needs to be
+114 −15
Original line number Diff line number Diff line
@@ -92,26 +92,28 @@ xfs_qm_adjust_dqlimits(
{
	struct xfs_quotainfo	*q = mp->m_quotainfo;
	struct xfs_disk_dquot	*d = &dq->q_core;
	struct xfs_def_quota	*defq;
	int			prealloc = 0;

	ASSERT(d->d_id);
	defq = xfs_get_defquota(dq, q);

	if (q->qi_bsoftlimit && !d->d_blk_softlimit) {
		d->d_blk_softlimit = cpu_to_be64(q->qi_bsoftlimit);
	if (defq->bsoftlimit && !d->d_blk_softlimit) {
		d->d_blk_softlimit = cpu_to_be64(defq->bsoftlimit);
		prealloc = 1;
	}
	if (q->qi_bhardlimit && !d->d_blk_hardlimit) {
		d->d_blk_hardlimit = cpu_to_be64(q->qi_bhardlimit);
	if (defq->bhardlimit && !d->d_blk_hardlimit) {
		d->d_blk_hardlimit = cpu_to_be64(defq->bhardlimit);
		prealloc = 1;
	}
	if (q->qi_isoftlimit && !d->d_ino_softlimit)
		d->d_ino_softlimit = cpu_to_be64(q->qi_isoftlimit);
	if (q->qi_ihardlimit && !d->d_ino_hardlimit)
		d->d_ino_hardlimit = cpu_to_be64(q->qi_ihardlimit);
	if (q->qi_rtbsoftlimit && !d->d_rtb_softlimit)
		d->d_rtb_softlimit = cpu_to_be64(q->qi_rtbsoftlimit);
	if (q->qi_rtbhardlimit && !d->d_rtb_hardlimit)
		d->d_rtb_hardlimit = cpu_to_be64(q->qi_rtbhardlimit);
	if (defq->isoftlimit && !d->d_ino_softlimit)
		d->d_ino_softlimit = cpu_to_be64(defq->isoftlimit);
	if (defq->ihardlimit && !d->d_ino_hardlimit)
		d->d_ino_hardlimit = cpu_to_be64(defq->ihardlimit);
	if (defq->rtbsoftlimit && !d->d_rtb_softlimit)
		d->d_rtb_softlimit = cpu_to_be64(defq->rtbsoftlimit);
	if (defq->rtbhardlimit && !d->d_rtb_hardlimit)
		d->d_rtb_hardlimit = cpu_to_be64(defq->rtbhardlimit);

	if (prealloc)
		xfs_dquot_set_prealloc_limits(dq);
@@ -232,7 +234,8 @@ xfs_qm_init_dquot_blk(
{
	struct xfs_quotainfo	*q = mp->m_quotainfo;
	xfs_dqblk_t	*d;
	int		curid, i;
	xfs_dqid_t	curid;
	int		i;

	ASSERT(tp);
	ASSERT(xfs_buf_islocked(bp));
@@ -243,7 +246,6 @@ xfs_qm_init_dquot_blk(
	 * ID of the first dquot in the block - id's are zero based.
	 */
	curid = id - (id % q->qi_dqperchunk);
	ASSERT(curid >= 0);
	memset(d, 0, BBTOB(q->qi_dqchunklen));
	for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
		d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
@@ -464,12 +466,13 @@ xfs_qm_dqtobp(
	struct xfs_bmbt_irec	map;
	int			nmaps = 1, error;
	struct xfs_buf		*bp;
	struct xfs_inode	*quotip = xfs_dq_to_quota_inode(dqp);
	struct xfs_inode	*quotip;
	struct xfs_mount	*mp = dqp->q_mount;
	xfs_dqid_t		id = be32_to_cpu(dqp->q_core.d_id);
	struct xfs_trans	*tp = (tpp ? *tpp : NULL);
	uint			lock_mode;

	quotip = xfs_quota_inode(dqp->q_mount, dqp->dq_flags);
	dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;

	lock_mode = xfs_ilock_data_map_shared(quotip);
@@ -684,6 +687,56 @@ xfs_qm_dqread(
	return error;
}

/*
 * Advance to the next id in the current chunk, or if at the
 * end of the chunk, skip ahead to first id in next allocated chunk
 * using the SEEK_DATA interface.
 */
int
xfs_dq_get_next_id(
	xfs_mount_t		*mp,
	uint			type,
	xfs_dqid_t		*id,
	loff_t			eof)
{
	struct xfs_inode	*quotip;
	xfs_fsblock_t		start;
	loff_t			offset;
	uint			lock;
	xfs_dqid_t		next_id;
	int			error = 0;

	/* Simple advance */
	next_id = *id + 1;

	/* If new ID is within the current chunk, advancing it sufficed */
	if (next_id % mp->m_quotainfo->qi_dqperchunk) {
		*id = next_id;
		return 0;
	}

	/* Nope, next_id is now past the current chunk, so find the next one */
	start = (xfs_fsblock_t)next_id / mp->m_quotainfo->qi_dqperchunk;

	quotip = xfs_quota_inode(mp, type);
	lock = xfs_ilock_data_map_shared(quotip);

	offset = __xfs_seek_hole_data(VFS_I(quotip), XFS_FSB_TO_B(mp, start),
				      eof, SEEK_DATA);
	if (offset < 0)
		error = offset;

	xfs_iunlock(quotip, lock);

	/* -ENXIO is essentially "no more data" */
	if (error)
		return (error == -ENXIO ? -ENOENT: error);

	/* Convert next data offset back to a quota id */
	*id = XFS_B_TO_FSB(mp, offset) * mp->m_quotainfo->qi_dqperchunk;
	return 0;
}

/*
 * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
 * a locked dquot, doing an allocation (if requested) as needed.
@@ -704,6 +757,7 @@ xfs_qm_dqget(
	struct xfs_quotainfo	*qi = mp->m_quotainfo;
	struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
	struct xfs_dquot	*dqp;
	loff_t			eof = 0;
	int			error;

	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
@@ -731,6 +785,21 @@ xfs_qm_dqget(
	}
#endif

	/* Get the end of the quota file if we need it */
	if (flags & XFS_QMOPT_DQNEXT) {
		struct xfs_inode	*quotip;
		xfs_fileoff_t		last;
		uint			lock_mode;

		quotip = xfs_quota_inode(mp, type);
		lock_mode = xfs_ilock_data_map_shared(quotip);
		error = xfs_bmap_last_offset(quotip, &last, XFS_DATA_FORK);
		xfs_iunlock(quotip, lock_mode);
		if (error)
			return error;
		eof = XFS_FSB_TO_B(mp, last);
	}

restart:
	mutex_lock(&qi->qi_tree_lock);
	dqp = radix_tree_lookup(tree, id);
@@ -744,6 +813,18 @@ xfs_qm_dqget(
			goto restart;
		}

		/* uninit / unused quota found in radix tree, keep looking  */
		if (flags & XFS_QMOPT_DQNEXT) {
			if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
				xfs_dqunlock(dqp);
				mutex_unlock(&qi->qi_tree_lock);
				error = xfs_dq_get_next_id(mp, type, &id, eof);
				if (error)
					return error;
				goto restart;
			}
		}

		dqp->q_nrefs++;
		mutex_unlock(&qi->qi_tree_lock);

@@ -770,6 +851,13 @@ xfs_qm_dqget(
	if (ip)
		xfs_ilock(ip, XFS_ILOCK_EXCL);

	/* If we are asked to find next active id, keep looking */
	if (error == -ENOENT && (flags & XFS_QMOPT_DQNEXT)) {
		error = xfs_dq_get_next_id(mp, type, &id, eof);
		if (!error)
			goto restart;
	}

	if (error)
		return error;

@@ -820,6 +908,17 @@ xfs_qm_dqget(
	qi->qi_dquots++;
	mutex_unlock(&qi->qi_tree_lock);

	/* If we are asked to find next active id, keep looking */
	if (flags & XFS_QMOPT_DQNEXT) {
		if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
			xfs_qm_dqput(dqp);
			error = xfs_dq_get_next_id(mp, type, &id, eof);
			if (error)
				return error;
			goto restart;
		}
	}

 dqret:
	ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
	trace_xfs_dqget_miss(dqp);
+57 −25
Original line number Diff line number Diff line
@@ -1337,31 +1337,31 @@ xfs_find_get_desired_pgoff(
	return found;
}

STATIC loff_t
xfs_seek_hole_data(
	struct file		*file,
/*
 * caller must lock inode with xfs_ilock_data_map_shared,
 * can we craft an appropriate ASSERT?
 *
 * end is because the VFS-level lseek interface is defined such that any
 * offset past i_size shall return -ENXIO, but we use this for quota code
 * which does not maintain i_size, and we want to SEEK_DATA past i_size.
 */
loff_t
__xfs_seek_hole_data(
	struct inode		*inode,
	loff_t			start,
	loff_t			end,
	int			whence)
{
	struct inode		*inode = file->f_mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	loff_t			uninitialized_var(offset);
	xfs_fsize_t		isize;
	xfs_fileoff_t		fsbno;
	xfs_filblks_t		end;
	uint			lock;
	xfs_filblks_t		lastbno;
	int			error;

	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

	lock = xfs_ilock_data_map_shared(ip);

	isize = i_size_read(inode);
	if (start >= isize) {
	if (start >= end) {
		error = -ENXIO;
		goto out_unlock;
		goto out_error;
	}

	/*
@@ -1369,22 +1369,22 @@ xfs_seek_hole_data(
	 * by fsbno to the end block of the file.
	 */
	fsbno = XFS_B_TO_FSBT(mp, start);
	end = XFS_B_TO_FSB(mp, isize);
	lastbno = XFS_B_TO_FSB(mp, end);

	for (;;) {
		struct xfs_bmbt_irec	map[2];
		int			nmap = 2;
		unsigned int		i;

		error = xfs_bmapi_read(ip, fsbno, end - fsbno, map, &nmap,
		error = xfs_bmapi_read(ip, fsbno, lastbno - fsbno, map, &nmap,
				       XFS_BMAPI_ENTIRE);
		if (error)
			goto out_unlock;
			goto out_error;

		/* No extents at given offset, must be beyond EOF */
		if (nmap == 0) {
			error = -ENXIO;
			goto out_unlock;
			goto out_error;
		}

		for (i = 0; i < nmap; i++) {
@@ -1426,7 +1426,7 @@ xfs_seek_hole_data(
			 * hole at the end of any file).
		 	 */
			if (whence == SEEK_HOLE) {
				offset = isize;
				offset = end;
				break;
			}
			/*
@@ -1434,7 +1434,7 @@ xfs_seek_hole_data(
			 */
			ASSERT(whence == SEEK_DATA);
			error = -ENXIO;
			goto out_unlock;
			goto out_error;
		}

		ASSERT(i > 1);
@@ -1445,14 +1445,14 @@ xfs_seek_hole_data(
		 */
		fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount;
		start = XFS_FSB_TO_B(mp, fsbno);
		if (start >= isize) {
		if (start >= end) {
			if (whence == SEEK_HOLE) {
				offset = isize;
				offset = end;
				break;
			}
			ASSERT(whence == SEEK_DATA);
			error = -ENXIO;
			goto out_unlock;
			goto out_error;
		}
	}

@@ -1464,7 +1464,39 @@ xfs_seek_hole_data(
	 * situation in particular.
	 */
	if (whence == SEEK_HOLE)
		offset = min_t(loff_t, offset, isize);
		offset = min_t(loff_t, offset, end);

	return offset;

out_error:
	return error;
}

STATIC loff_t
xfs_seek_hole_data(
	struct file		*file,
	loff_t			start,
	int			whence)
{
	struct inode		*inode = file->f_mapping->host;
	struct xfs_inode	*ip = XFS_I(inode);
	struct xfs_mount	*mp = ip->i_mount;
	uint			lock;
	loff_t			offset, end;
	int			error = 0;

	if (XFS_FORCED_SHUTDOWN(mp))
		return -EIO;

	lock = xfs_ilock_data_map_shared(ip);

	end = i_size_read(inode);
	offset = __xfs_seek_hole_data(inode, start, end, whence);
	if (offset < 0) {
		error = offset;
		goto out_unlock;
	}

	offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);

out_unlock:
+2 −0
Original line number Diff line number Diff line
@@ -437,6 +437,8 @@ int xfs_update_prealloc_flags(struct xfs_inode *ip,
int	xfs_zero_eof(struct xfs_inode *ip, xfs_off_t offset,
		     xfs_fsize_t isize, bool *did_zeroing);
int	xfs_iozero(struct xfs_inode *ip, loff_t pos, size_t count);
loff_t	__xfs_seek_hole_data(struct inode *inode, loff_t start,
			     loff_t eof, int whence);


/* from xfs_iops.c */
Loading