Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a694ad94 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs

* 'for-linus' of git://oss.sgi.com/xfs/xfs:
  xfs: fix the logspace waiting algorithm
  xfs: fix nfs export of 64-bit inodes numbers on 32-bit kernels
  xfs: fix allocation length overflow in xfs_bmapi_write()
parents 1c70132f 9f9c19ec
Loading
Loading
Loading
Loading
+19 −1
Original line number Original line Diff line number Diff line
@@ -2383,6 +2383,8 @@ xfs_bmap_btalloc(
	int		tryagain;
	int		tryagain;
	int		error;
	int		error;


	ASSERT(ap->length);

	mp = ap->ip->i_mount;
	mp = ap->ip->i_mount;
	align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
	align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
	if (unlikely(align)) {
	if (unlikely(align)) {
@@ -4629,6 +4631,8 @@ xfs_bmapi_allocate(
	int			error;
	int			error;
	int			rt;
	int			rt;


	ASSERT(bma->length > 0);

	rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(bma->ip);
	rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(bma->ip);


	/*
	/*
@@ -4849,6 +4853,7 @@ xfs_bmapi_write(
	ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
	ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
	ASSERT(!(flags & XFS_BMAPI_IGSTATE));
	ASSERT(!(flags & XFS_BMAPI_IGSTATE));
	ASSERT(tp != NULL);
	ASSERT(tp != NULL);
	ASSERT(len > 0);


	whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
	whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
		XFS_ATTR_FORK : XFS_DATA_FORK;
		XFS_ATTR_FORK : XFS_DATA_FORK;
@@ -4918,9 +4923,22 @@ xfs_bmapi_write(
			bma.eof = eof;
			bma.eof = eof;
			bma.conv = !!(flags & XFS_BMAPI_CONVERT);
			bma.conv = !!(flags & XFS_BMAPI_CONVERT);
			bma.wasdel = wasdelay;
			bma.wasdel = wasdelay;
			bma.length = len;
			bma.offset = bno;
			bma.offset = bno;


			/*
			 * There's a 32/64 bit type mismatch between the
			 * allocation length request (which can be 64 bits in
			 * length) and the bma length request, which is
			 * xfs_extlen_t and therefore 32 bits. Hence we have to
			 * check for 32-bit overflows and handle them here.
			 */
			if (len > (xfs_filblks_t)MAXEXTLEN)
				bma.length = MAXEXTLEN;
			else
				bma.length = len;

			ASSERT(len > 0);
			ASSERT(bma.length > 0);
			error = xfs_bmapi_allocate(&bma, flags);
			error = xfs_bmapi_allocate(&bma, flags);
			if (error)
			if (error)
				goto error0;
				goto error0;
+4 −4
Original line number Original line Diff line number Diff line
@@ -98,22 +98,22 @@ xfs_fs_encode_fh(
	switch (fileid_type) {
	switch (fileid_type) {
	case FILEID_INO32_GEN_PARENT:
	case FILEID_INO32_GEN_PARENT:
		spin_lock(&dentry->d_lock);
		spin_lock(&dentry->d_lock);
		fid->i32.parent_ino = dentry->d_parent->d_inode->i_ino;
		fid->i32.parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
		fid->i32.parent_gen = dentry->d_parent->d_inode->i_generation;
		fid->i32.parent_gen = dentry->d_parent->d_inode->i_generation;
		spin_unlock(&dentry->d_lock);
		spin_unlock(&dentry->d_lock);
		/*FALLTHRU*/
		/*FALLTHRU*/
	case FILEID_INO32_GEN:
	case FILEID_INO32_GEN:
		fid->i32.ino = inode->i_ino;
		fid->i32.ino = XFS_I(inode)->i_ino;
		fid->i32.gen = inode->i_generation;
		fid->i32.gen = inode->i_generation;
		break;
		break;
	case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG:
	case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG:
		spin_lock(&dentry->d_lock);
		spin_lock(&dentry->d_lock);
		fid64->parent_ino = dentry->d_parent->d_inode->i_ino;
		fid64->parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
		fid64->parent_gen = dentry->d_parent->d_inode->i_generation;
		fid64->parent_gen = dentry->d_parent->d_inode->i_generation;
		spin_unlock(&dentry->d_lock);
		spin_unlock(&dentry->d_lock);
		/*FALLTHRU*/
		/*FALLTHRU*/
	case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG:
	case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG:
		fid64->ino = inode->i_ino;
		fid64->ino = XFS_I(inode)->i_ino;
		fid64->gen = inode->i_generation;
		fid64->gen = inode->i_generation;
		break;
		break;
	}
	}
+173 −175
Original line number Original line Diff line number Diff line
@@ -150,6 +150,117 @@ xlog_grant_add_space(
	} while (head_val != old);
	} while (head_val != old);
}
}


STATIC bool
xlog_reserveq_wake(
	struct log		*log,
	int			*free_bytes)
{
	struct xlog_ticket	*tic;
	int			need_bytes;

	list_for_each_entry(tic, &log->l_reserveq, t_queue) {
		if (tic->t_flags & XLOG_TIC_PERM_RESERV)
			need_bytes = tic->t_unit_res * tic->t_cnt;
		else
			need_bytes = tic->t_unit_res;

		if (*free_bytes < need_bytes)
			return false;
		*free_bytes -= need_bytes;

		trace_xfs_log_grant_wake_up(log, tic);
		wake_up(&tic->t_wait);
	}

	return true;
}

STATIC bool
xlog_writeq_wake(
	struct log		*log,
	int			*free_bytes)
{
	struct xlog_ticket	*tic;
	int			need_bytes;

	list_for_each_entry(tic, &log->l_writeq, t_queue) {
		ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);

		need_bytes = tic->t_unit_res;

		if (*free_bytes < need_bytes)
			return false;
		*free_bytes -= need_bytes;

		trace_xfs_log_regrant_write_wake_up(log, tic);
		wake_up(&tic->t_wait);
	}

	return true;
}

STATIC int
xlog_reserveq_wait(
	struct log		*log,
	struct xlog_ticket	*tic,
	int			need_bytes)
{
	list_add_tail(&tic->t_queue, &log->l_reserveq);

	do {
		if (XLOG_FORCED_SHUTDOWN(log))
			goto shutdown;
		xlog_grant_push_ail(log, need_bytes);

		XFS_STATS_INC(xs_sleep_logspace);
		trace_xfs_log_grant_sleep(log, tic);

		xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
		trace_xfs_log_grant_wake(log, tic);

		spin_lock(&log->l_grant_reserve_lock);
		if (XLOG_FORCED_SHUTDOWN(log))
			goto shutdown;
	} while (xlog_space_left(log, &log->l_grant_reserve_head) < need_bytes);

	list_del_init(&tic->t_queue);
	return 0;
shutdown:
	list_del_init(&tic->t_queue);
	return XFS_ERROR(EIO);
}

STATIC int
xlog_writeq_wait(
	struct log		*log,
	struct xlog_ticket	*tic,
	int			need_bytes)
{
	list_add_tail(&tic->t_queue, &log->l_writeq);

	do {
		if (XLOG_FORCED_SHUTDOWN(log))
			goto shutdown;
		xlog_grant_push_ail(log, need_bytes);

		XFS_STATS_INC(xs_sleep_logspace);
		trace_xfs_log_regrant_write_sleep(log, tic);

		xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
		trace_xfs_log_regrant_write_wake(log, tic);

		spin_lock(&log->l_grant_write_lock);
		if (XLOG_FORCED_SHUTDOWN(log))
			goto shutdown;
	} while (xlog_space_left(log, &log->l_grant_write_head) < need_bytes);

	list_del_init(&tic->t_queue);
	return 0;
shutdown:
	list_del_init(&tic->t_queue);
	return XFS_ERROR(EIO);
}

static void
static void
xlog_tic_reset_res(xlog_ticket_t *tic)
xlog_tic_reset_res(xlog_ticket_t *tic)
{
{
@@ -350,8 +461,19 @@ xfs_log_reserve(
		retval = xlog_grant_log_space(log, internal_ticket);
		retval = xlog_grant_log_space(log, internal_ticket);
	}
	}


	if (unlikely(retval)) {
		/*
		 * If we are failing, make sure the ticket doesn't have any
		 * current reservations.  We don't want to add this back
		 * when the ticket/ transaction gets cancelled.
		 */
		internal_ticket->t_curr_res = 0;
		/* ungrant will give back unit_res * t_cnt. */
		internal_ticket->t_cnt = 0;
	}

	return retval;
	return retval;
}	/* xfs_log_reserve */
}




/*
/*
@@ -2481,8 +2603,8 @@ xlog_state_get_iclog_space(xlog_t *log,
/*
/*
 * Atomically get the log space required for a log ticket.
 * Atomically get the log space required for a log ticket.
 *
 *
 * Once a ticket gets put onto the reserveq, it will only return after
 * Once a ticket gets put onto the reserveq, it will only return after the
 * the needed reservation is satisfied.
 * needed reservation is satisfied.
 *
 *
 * This function is structured so that it has a lock free fast path. This is
 * This function is structured so that it has a lock free fast path. This is
 * necessary because every new transaction reservation will come through this
 * necessary because every new transaction reservation will come through this
@@ -2490,113 +2612,53 @@ xlog_state_get_iclog_space(xlog_t *log,
 * every pass.
 * every pass.
 *
 *
 * As tickets are only ever moved on and off the reserveq under the
 * As tickets are only ever moved on and off the reserveq under the
 * l_grant_reserve_lock, we only need to take that lock if we are going
 * l_grant_reserve_lock, we only need to take that lock if we are going to add
 * to add the ticket to the queue and sleep. We can avoid taking the lock if the
 * the ticket to the queue and sleep. We can avoid taking the lock if the ticket
 * ticket was never added to the reserveq because the t_queue list head will be
 * was never added to the reserveq because the t_queue list head will be empty
 * empty and we hold the only reference to it so it can safely be checked
 * and we hold the only reference to it so it can safely be checked unlocked.
 * unlocked.
 */
 */
STATIC int
STATIC int
xlog_grant_log_space(xlog_t	   *log,
xlog_grant_log_space(
		     xlog_ticket_t *tic)
	struct log		*log,
	struct xlog_ticket	*tic)
{
{
	int		 free_bytes;
	int			free_bytes, need_bytes;
	int		 need_bytes;
	int			error = 0;


#ifdef DEBUG
	ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
	if (log->l_flags & XLOG_ACTIVE_RECOVERY)
		panic("grant Recovery problem");
#endif


	trace_xfs_log_grant_enter(log, tic);
	trace_xfs_log_grant_enter(log, tic);


	/*
	 * If there are other waiters on the queue then give them a chance at
	 * logspace before us.  Wake up the first waiters, if we do not wake
	 * up all the waiters then go to sleep waiting for more free space,
	 * otherwise try to get some space for this transaction.
	 */
	need_bytes = tic->t_unit_res;
	need_bytes = tic->t_unit_res;
	if (tic->t_flags & XFS_LOG_PERM_RESERV)
	if (tic->t_flags & XFS_LOG_PERM_RESERV)
		need_bytes *= tic->t_ocnt;
		need_bytes *= tic->t_ocnt;

	free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
	/* something is already sleeping; insert new transaction at end */
	if (!list_empty_careful(&log->l_reserveq)) {
	if (!list_empty_careful(&log->l_reserveq)) {
		spin_lock(&log->l_grant_reserve_lock);
		spin_lock(&log->l_grant_reserve_lock);
		/* recheck the queue now we are locked */
		if (!xlog_reserveq_wake(log, &free_bytes) ||
		if (list_empty(&log->l_reserveq)) {
		    free_bytes < need_bytes)
			error = xlog_reserveq_wait(log, tic, need_bytes);
		spin_unlock(&log->l_grant_reserve_lock);
		spin_unlock(&log->l_grant_reserve_lock);
			goto redo;
	} else if (free_bytes < need_bytes) {
		}
		list_add_tail(&tic->t_queue, &log->l_reserveq);

		trace_xfs_log_grant_sleep1(log, tic);

		/*
		 * Gotta check this before going to sleep, while we're
		 * holding the grant lock.
		 */
		if (XLOG_FORCED_SHUTDOWN(log))
			goto error_return;

		XFS_STATS_INC(xs_sleep_logspace);
		xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);

		/*
		 * If we got an error, and the filesystem is shutting down,
		 * we'll catch it down below. So just continue...
		 */
		trace_xfs_log_grant_wake1(log, tic);
	}

redo:
	if (XLOG_FORCED_SHUTDOWN(log))
		goto error_return_unlocked;

	free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
	if (free_bytes < need_bytes) {
		spin_lock(&log->l_grant_reserve_lock);
		if (list_empty(&tic->t_queue))
			list_add_tail(&tic->t_queue, &log->l_reserveq);

		trace_xfs_log_grant_sleep2(log, tic);

		if (XLOG_FORCED_SHUTDOWN(log))
			goto error_return;

		xlog_grant_push_ail(log, need_bytes);

		XFS_STATS_INC(xs_sleep_logspace);
		xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);

		trace_xfs_log_grant_wake2(log, tic);
		goto redo;
	}

	if (!list_empty(&tic->t_queue)) {
		spin_lock(&log->l_grant_reserve_lock);
		spin_lock(&log->l_grant_reserve_lock);
		list_del_init(&tic->t_queue);
		error = xlog_reserveq_wait(log, tic, need_bytes);
		spin_unlock(&log->l_grant_reserve_lock);
		spin_unlock(&log->l_grant_reserve_lock);
	}
	}
	if (error)
		return error;


	/* we've got enough space */
	xlog_grant_add_space(log, &log->l_grant_reserve_head, need_bytes);
	xlog_grant_add_space(log, &log->l_grant_reserve_head, need_bytes);
	xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
	xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
	trace_xfs_log_grant_exit(log, tic);
	trace_xfs_log_grant_exit(log, tic);
	xlog_verify_grant_tail(log);
	xlog_verify_grant_tail(log);
	return 0;
	return 0;

}
error_return_unlocked:
	spin_lock(&log->l_grant_reserve_lock);
error_return:
	list_del_init(&tic->t_queue);
	spin_unlock(&log->l_grant_reserve_lock);
	trace_xfs_log_grant_error(log, tic);

	/*
	 * If we are failing, make sure the ticket doesn't have any
	 * current reservations. We don't want to add this back when
	 * the ticket/transaction gets cancelled.
	 */
	tic->t_curr_res = 0;
	tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
	return XFS_ERROR(EIO);
}	/* xlog_grant_log_space */



/*
/*
 * Replenish the byte reservation required by moving the grant write head.
 * Replenish the byte reservation required by moving the grant write head.
@@ -2605,10 +2667,12 @@ xlog_grant_log_space(xlog_t *log,
 * free fast path.
 * free fast path.
 */
 */
STATIC int
STATIC int
xlog_regrant_write_log_space(xlog_t	   *log,
xlog_regrant_write_log_space(
			     xlog_ticket_t *tic)
	struct log		*log,
	struct xlog_ticket	*tic)
{
{
	int			free_bytes, need_bytes;
	int			free_bytes, need_bytes;
	int			error = 0;


	tic->t_curr_res = tic->t_unit_res;
	tic->t_curr_res = tic->t_unit_res;
	xlog_tic_reset_res(tic);
	xlog_tic_reset_res(tic);
@@ -2616,104 +2680,38 @@ xlog_regrant_write_log_space(xlog_t *log,
	if (tic->t_cnt > 0)
	if (tic->t_cnt > 0)
		return 0;
		return 0;


#ifdef DEBUG
	ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
	if (log->l_flags & XLOG_ACTIVE_RECOVERY)
		panic("regrant Recovery problem");
#endif


	trace_xfs_log_regrant_write_enter(log, tic);
	trace_xfs_log_regrant_write_enter(log, tic);
	if (XLOG_FORCED_SHUTDOWN(log))
		goto error_return_unlocked;


	/* If there are other waiters on the queue then give them a
	/*
	 * chance at logspace before us. Wake up the first waiters,
	 * If there are other waiters on the queue then give them a chance at
	 * if we do not wake up all the waiters then go to sleep waiting
	 * logspace before us.  Wake up the first waiters, if we do not wake
	 * for more free space, otherwise try to get some space for
	 * up all the waiters then go to sleep waiting for more free space,
	 * this transaction.
	 * otherwise try to get some space for this transaction.
	 */
	 */
	need_bytes = tic->t_unit_res;
	need_bytes = tic->t_unit_res;
	free_bytes = xlog_space_left(log, &log->l_grant_write_head);
	if (!list_empty_careful(&log->l_writeq)) {
	if (!list_empty_careful(&log->l_writeq)) {
		struct xlog_ticket *ntic;

		spin_lock(&log->l_grant_write_lock);
		spin_lock(&log->l_grant_write_lock);
		free_bytes = xlog_space_left(log, &log->l_grant_write_head);
		if (!xlog_writeq_wake(log, &free_bytes) ||
		list_for_each_entry(ntic, &log->l_writeq, t_queue) {
		    free_bytes < need_bytes)
			ASSERT(ntic->t_flags & XLOG_TIC_PERM_RESERV);
			error = xlog_writeq_wait(log, tic, need_bytes);

			if (free_bytes < ntic->t_unit_res)
				break;
			free_bytes -= ntic->t_unit_res;
			wake_up(&ntic->t_wait);
		}

		if (ntic != list_first_entry(&log->l_writeq,
						struct xlog_ticket, t_queue)) {
			if (list_empty(&tic->t_queue))
				list_add_tail(&tic->t_queue, &log->l_writeq);
			trace_xfs_log_regrant_write_sleep1(log, tic);

			xlog_grant_push_ail(log, need_bytes);

			XFS_STATS_INC(xs_sleep_logspace);
			xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
			trace_xfs_log_regrant_write_wake1(log, tic);
		} else
		spin_unlock(&log->l_grant_write_lock);
		spin_unlock(&log->l_grant_write_lock);
	}
	} else if (free_bytes < need_bytes) {

redo:
	if (XLOG_FORCED_SHUTDOWN(log))
		goto error_return_unlocked;

	free_bytes = xlog_space_left(log, &log->l_grant_write_head);
	if (free_bytes < need_bytes) {
		spin_lock(&log->l_grant_write_lock);
		spin_lock(&log->l_grant_write_lock);
		if (list_empty(&tic->t_queue))
		error = xlog_writeq_wait(log, tic, need_bytes);
			list_add_tail(&tic->t_queue, &log->l_writeq);

		if (XLOG_FORCED_SHUTDOWN(log))
			goto error_return;

		xlog_grant_push_ail(log, need_bytes);

		XFS_STATS_INC(xs_sleep_logspace);
		trace_xfs_log_regrant_write_sleep2(log, tic);
		xlog_wait(&tic->t_wait, &log->l_grant_write_lock);

		trace_xfs_log_regrant_write_wake2(log, tic);
		goto redo;
	}

	if (!list_empty(&tic->t_queue)) {
		spin_lock(&log->l_grant_write_lock);
		list_del_init(&tic->t_queue);
		spin_unlock(&log->l_grant_write_lock);
		spin_unlock(&log->l_grant_write_lock);
	}
	}


	/* we've got enough space */
	if (error)
		return error;

	xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
	xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
	trace_xfs_log_regrant_write_exit(log, tic);
	trace_xfs_log_regrant_write_exit(log, tic);
	xlog_verify_grant_tail(log);
	xlog_verify_grant_tail(log);
	return 0;
	return 0;

}

 error_return_unlocked:
	spin_lock(&log->l_grant_write_lock);
 error_return:
	list_del_init(&tic->t_queue);
	spin_unlock(&log->l_grant_write_lock);
	trace_xfs_log_regrant_write_error(log, tic);

	/*
	 * If we are failing, make sure the ticket doesn't have any
	 * current reservations. We don't want to add this back when
	 * the ticket/transaction gets cancelled.
	 */
	tic->t_curr_res = 0;
	tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
	return XFS_ERROR(EIO);
}	/* xlog_regrant_write_log_space */



/* The first cnt-1 times through here we don't need to
/* The first cnt-1 times through here we don't need to
 * move the grant write head because the permanent
 * move the grant write head because the permanent
+4 −8
Original line number Original line Diff line number Diff line
@@ -834,18 +834,14 @@ DEFINE_LOGGRANT_EVENT(xfs_log_umount_write);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_enter);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_enter);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_exit);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_exit);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_error);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_error);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep1);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep1);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake_up);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake_up);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit);