Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 80529c45 authored by Bill O'Donnell's avatar Bill O'Donnell Committed by Dave Chinner
Browse files

xfs: pass xfsstats structures to handlers and macros



This patch is the next step toward per-fs xfs stats. The patch makes
the show and clear routines able to handle any stats structure
associated with a kobject.

Instead of a single global xfsstats structure, add kobject and a pointer
to a per-cpu struct xfsstats. Modify the macros that manipulate the stats
accordingly: XFS_STATS_INC, XFS_STATS_DEC, and XFS_STATS_ADD now access
xfsstats->xs_stats.

The sysfs functions need to get from the kobject back to the xfsstats
structure which contains it, and pass the pointer to the ->xs_stats
percpu structure into the show & clear routines.

Signed-off-by: default avatarBill O'Donnell <billodo@redhat.com>
Reviewed-by: default avatarEric Sandeen <sandeen@redhat.com>
Signed-off-by: default avatarDave Chinner <david@fromorbit.com>
parent a27c2640
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
@@ -171,6 +171,13 @@ struct xfs_kobj {
	struct completion	complete;
};

struct xstats {
	struct xfsstats __percpu	*xs_stats;
	struct xfs_kobj			xs_kobj;
};

extern struct xstats xfsstats;

/* Kernel uid/gid conversion. These are used to convert to/from the on disk
 * uid_t/gid_t types to the kuid_t/kgid_t types that the kernel uses internally.
 * The conversion here is type only, the value will remain the same since we
+15 −17
Original line number Diff line number Diff line
@@ -18,18 +18,18 @@
#include "xfs.h"
#include <linux/proc_fs.h>

DEFINE_PER_CPU(struct xfsstats, xfsstats);
struct xstats xfsstats;

static int counter_val(int idx)
static int counter_val(struct xfsstats __percpu *stats, int idx)
{
	int val = 0, cpu;

	for_each_possible_cpu(cpu)
		val += *(((__u32 *)&per_cpu(xfsstats, cpu) + idx));
		val += *(((__u32 *)per_cpu_ptr(stats, cpu) + idx));
	return val;
}

int xfs_stats_format(char *buf)
int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
{
	int		i, j;
	int		len = 0;
@@ -73,14 +73,14 @@ int xfs_stats_format(char *buf)
		/* inner loop does each group */
		for (; j < xstats[i].endpoint; j++)
			len += snprintf(buf + len, PATH_MAX - len, " %u",
					counter_val(j));
					counter_val(stats, j));
		len += snprintf(buf + len, PATH_MAX - len, "\n");
	}
	/* extra precision counters */
	for_each_possible_cpu(i) {
		xs_xstrat_bytes += per_cpu(xfsstats, i).xs_xstrat_bytes;
		xs_write_bytes += per_cpu(xfsstats, i).xs_write_bytes;
		xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes;
		xs_xstrat_bytes += per_cpu_ptr(stats, i)->xs_xstrat_bytes;
		xs_write_bytes += per_cpu_ptr(stats, i)->xs_write_bytes;
		xs_read_bytes += per_cpu_ptr(stats, i)->xs_read_bytes;
	}

	len += snprintf(buf + len, PATH_MAX-len, "xpc %Lu %Lu %Lu\n",
@@ -95,7 +95,7 @@ int xfs_stats_format(char *buf)
	return len;
}

void xfs_stats_clearall(void)
void xfs_stats_clearall(struct xfsstats __percpu *stats)
{
	int		c;
	__uint32_t	vn_active;
@@ -104,9 +104,9 @@ void xfs_stats_clearall(void)
	for_each_possible_cpu(c) {
		preempt_disable();
		/* save vn_active, it's a universal truth! */
		vn_active = per_cpu(xfsstats, c).vn_active;
		memset(&per_cpu(xfsstats, c), 0, sizeof(struct xfsstats));
		per_cpu(xfsstats, c).vn_active = vn_active;
		vn_active = per_cpu_ptr(stats, c)->vn_active;
		memset(per_cpu_ptr(stats, c), 0, sizeof(*stats));
		per_cpu_ptr(stats, c)->vn_active = vn_active;
		preempt_enable();
	}
}
@@ -117,10 +117,8 @@ static int xqm_proc_show(struct seq_file *m, void *v)
{
	/* maximum; incore; ratio free to inuse; freelist */
	seq_printf(m, "%d\t%d\t%d\t%u\n",
			0,
			counter_val(XFSSTAT_END_XQMSTAT),
			0,
			counter_val(XFSSTAT_END_XQMSTAT + 1));
		   0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT),
		   0, counter_val(xfsstats.xs_stats, XFSSTAT_END_XQMSTAT + 1));
	return 0;
}

@@ -144,7 +142,7 @@ static int xqmstat_proc_show(struct seq_file *m, void *v)

	seq_printf(m, "qm");
	for (j = XFSSTAT_END_IBT_V2; j < XFSSTAT_END_XQMSTAT; j++)
		seq_printf(m, " %u", counter_val(j));
		seq_printf(m, " %u", counter_val(xfsstats.xs_stats, j));
	seq_putc(m, '\n');
	return 0;
}
+11 −11
Original line number Diff line number Diff line
@@ -18,9 +18,6 @@
#ifndef __XFS_STATS_H__
#define __XFS_STATS_H__

int xfs_stats_format(char *buf);
void xfs_stats_clearall(void);

#if defined(CONFIG_PROC_FS) && !defined(XFS_STATS_OFF)

#include <linux/percpu.h>
@@ -217,15 +214,18 @@ struct xfsstats {
	__uint64_t		xs_read_bytes;
};

DECLARE_PER_CPU(struct xfsstats, xfsstats);
int xfs_stats_format(struct xfsstats __percpu *stats, char *buf);
void xfs_stats_clearall(struct xfsstats __percpu *stats);
extern struct xstats xfsstats;

/*
 * We don't disable preempt, not too worried about poking the
 * wrong CPU's stat for now (also aggregated before reporting).
 */
#define XFS_STATS_INC(v)	(per_cpu(xfsstats, current_cpu()).v++)
#define XFS_STATS_DEC(v)	(per_cpu(xfsstats, current_cpu()).v--)
#define XFS_STATS_ADD(v, inc)	(per_cpu(xfsstats, current_cpu()).v += (inc))
#define XFS_STATS_INC(v)	\
	(per_cpu_ptr(xfsstats.xs_stats, current_cpu())->v++)

#define XFS_STATS_DEC(v)	\
	(per_cpu_ptr(xfsstats.xs_stats, current_cpu())->v--)

#define XFS_STATS_ADD(v, inc)	\
	(per_cpu_ptr(xfsstats.xs_stats, current_cpu())->v += (inc))

extern int xfs_init_procfs(void);
extern void xfs_cleanup_procfs(void);
+15 −6
Original line number Diff line number Diff line
@@ -61,7 +61,6 @@ static kmem_zone_t *xfs_ioend_zone;
mempool_t *xfs_ioend_pool;

static struct kset *xfs_kset;		/* top-level xfs sysfs dir */
static struct xfs_kobj xfs_stats_kobj;	/* global stats sysfs attrs */
#ifdef DEBUG
static struct xfs_kobj xfs_dbg_kobj;	/* global debug sysfs attrs */
#endif
@@ -1842,11 +1841,18 @@ init_xfs_fs(void)
		goto out_sysctl_unregister;
	}

	xfs_stats_kobj.kobject.kset = xfs_kset;
	error = xfs_sysfs_init(&xfs_stats_kobj, &xfs_stats_ktype, NULL,
	xfsstats.xs_kobj.kobject.kset = xfs_kset;

	xfsstats.xs_stats = alloc_percpu(struct xfsstats);
	if (!xfsstats.xs_stats) {
		error = -ENOMEM;
		goto out_kset_unregister;
	}

	error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
			       "stats");
	if (error)
		goto out_kset_unregister;
		goto out_free_stats;

#ifdef DEBUG
	xfs_dbg_kobj.kobject.kset = xfs_kset;
@@ -1871,7 +1877,9 @@ init_xfs_fs(void)
	xfs_sysfs_del(&xfs_dbg_kobj);
 out_remove_stats_kobj:
#endif
	xfs_sysfs_del(&xfs_stats_kobj);
	xfs_sysfs_del(&xfsstats.xs_kobj);
 out_free_stats:
	free_percpu(xfsstats.xs_stats);
 out_kset_unregister:
	kset_unregister(xfs_kset);
 out_sysctl_unregister:
@@ -1898,7 +1906,8 @@ exit_xfs_fs(void)
#ifdef DEBUG
	xfs_sysfs_del(&xfs_dbg_kobj);
#endif
	xfs_sysfs_del(&xfs_stats_kobj);
	xfs_sysfs_del(&xfsstats.xs_kobj);
	free_percpu(xfsstats.xs_stats);
	kset_unregister(xfs_kset);
	xfs_sysctl_unregister();
	xfs_cleanup_procfs();
+1 −1
Original line number Diff line number Diff line
@@ -37,7 +37,7 @@ xfs_stats_clear_proc_handler(
	ret = proc_dointvec_minmax(ctl, write, buffer, lenp, ppos);

	if (!ret && write && *valp) {
		xfs_stats_clearall();
		xfs_stats_clearall(xfsstats.xs_stats);
		xfs_stats_clear = 0;
	}

Loading