Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 14c9fda5 authored by Dan Magenheimer's avatar Dan Magenheimer Committed by Greg Kroah-Hartman
Browse files

staging: ramster: place ramster codebase on top of new zcache2 codebase

[V2: rebased to apply to 20120905 staging-next, no other changes]

This slightly modified ramster codebase is now built entirely on zcache2
and all ramster-specific code is fully contained in a subdirectory.

Ramster extends zcache2 to allow pages compressed via zcache2 to be
"load-balanced" across machines in a cluster.  Control and data communication
is done via kernel sockets, and cluster configuration and management is
heavily leveraged from the ocfs2 cluster filesystem.

There are no new features since the codebase introduced into staging at 3.4.
Some cleanup was performed though:
 1) Interfaces directly with new zbud
 2) Debugfs now used instead of sysfs where possible.  Sysfs still
    used where necessary for userland cluster configuration.

Ramster is very much a work-in-progress but also does really work!

RAMSTER HIGH LEVEL OVERVIEW (from original V5 posting in Feb 2012)

RAMster implements peer-to-peer transcendent memory, allowing a "cluster" of
kernels to dynamically pool their RAM so that a RAM-hungry workload on one
machine can temporarily and transparently utilize RAM on another machine which
is presumably idle or running a non-RAM-hungry workload.  Other than the
already-merged cleancache patchset and frontswap patchset, no core kernel
changes are currently required.

(Note that, unlike previous public descriptions of RAMster, this implementation
does NOT require synchronous "gets" or core networking changes. As of V5,
it also co-exists with ocfs2.)

RAMster combines a clustering and messaging foundation based on the ocfs2
cluster layer with the in-kernel compression implementation of zcache2, and
adds code to glue them together.  When a page is "put" to RAMster, it is
compressed and stored locally.  Periodically, a thread will "remotify" these
pages by sending them via messages to a remote machine.  When the page is
later needed as indicated by a page fault, a "get" is issued.  If the data
is local, it is uncompressed and the fault is resolved.  If the data is
remote, a message is sent to fetch the data and the faulting thread sleeps;
when the data arrives, the thread awakens, the data is decompressed and
the fault is resolved.

As of V5, clusters up to eight nodes are supported; each node can remotify
pages to one specified node, so clusters can be configured as clients to
a "memory server".  Some simple policy is in place that will need to be
refined over time.  Larger clusters and fault-resistant protocols can also
be added over time.

A HOW-TO is available at:
http://oss.oracle.com/projects/tmem/dist/files/RAMster/HOWTO-120817



Acked-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: default avatarDan Magenheimer <dan.magenheimer@oracle.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent faca2ef7
Loading
Loading
Loading
Loading
+14 −0
Original line number Diff line number Diff line
@@ -14,3 +14,17 @@ config ZCACHE2
	  technical disagreements.  It is intended that they will merge
	  again in the future.  Until then, zcache2 is a single-node
	  version of ramster.

config RAMSTER
	bool "Cross-machine RAM capacity sharing, aka peer-to-peer tmem"
	depends on CONFIGFS_FS=y && SYSFS=y && !HIGHMEM && ZCACHE2=y
	# must ensure struct page is 8-byte aligned
	select HAVE_ALIGNED_STRUCT_PAGE if !64_BIT
	default n
	help
	  RAMster allows RAM on other machines in a cluster to be utilized
	  dynamically and symmetrically instead of swapping to a local swap
	  disk, thus improving performance on memory-constrained workloads
	  while minimizing total RAM across the cluster.  RAMster, like
	  zcache2, compresses swap pages into local RAM, but then remotifies
	  the compressed pages to another node in the RAMster cluster.
+3 −0
Original line number Diff line number Diff line
zcache-y	:=		zcache-main.o tmem.o zbud.o
zcache-$(CONFIG_RAMSTER)	+=	ramster/ramster.o ramster/r2net.o
zcache-$(CONFIG_RAMSTER)	+=	ramster/nodemanager.o ramster/tcp.o
zcache-$(CONFIG_RAMSTER)	+=	ramster/heartbeat.o ramster/masklog.o

obj-$(CONFIG_ZCACHE2)	+=	zcache.o
+462 −0
Original line number Diff line number Diff line
/* -*- mode: c; c-basic-offset: 8; -*-
 * vim: noexpandtab sw=8 ts=8 sts=0:
 *
 * Copyright (C) 2004, 2005, 2012 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/configfs.h>

#include "heartbeat.h"
#include "tcp.h"
#include "nodemanager.h"

#include "masklog.h"

/*
 * The first heartbeat pass had one global thread that would serialize all hb
 * callback calls.  This global serializing sem should only be removed once
 * we've made sure that all callees can deal with being called concurrently
 * from multiple hb region threads.
 */
static DECLARE_RWSEM(r2hb_callback_sem);

/*
 * multiple hb threads are watching multiple regions.  A node is live
 * whenever any of the threads sees activity from the node in its region.
 */
static DEFINE_SPINLOCK(r2hb_live_lock);
static unsigned long r2hb_live_node_bitmap[BITS_TO_LONGS(R2NM_MAX_NODES)];

static struct r2hb_callback {
	struct list_head list;
} r2hb_callbacks[R2HB_NUM_CB];

enum r2hb_heartbeat_modes {
	R2HB_HEARTBEAT_LOCAL		= 0,
	R2HB_HEARTBEAT_GLOBAL,
	R2HB_HEARTBEAT_NUM_MODES,
};

char *r2hb_heartbeat_mode_desc[R2HB_HEARTBEAT_NUM_MODES] = {
		"local",	/* R2HB_HEARTBEAT_LOCAL */
		"global",	/* R2HB_HEARTBEAT_GLOBAL */
};

unsigned int r2hb_dead_threshold = R2HB_DEFAULT_DEAD_THRESHOLD;
unsigned int r2hb_heartbeat_mode = R2HB_HEARTBEAT_LOCAL;

/* Only sets a new threshold if there are no active regions.
 *
 * No locking or otherwise interesting code is required for reading
 * r2hb_dead_threshold as it can't change once regions are active and
 * it's not interesting to anyone until then anyway. */
static void r2hb_dead_threshold_set(unsigned int threshold)
{
	if (threshold > R2HB_MIN_DEAD_THRESHOLD) {
		spin_lock(&r2hb_live_lock);
		r2hb_dead_threshold = threshold;
		spin_unlock(&r2hb_live_lock);
	}
}

static int r2hb_global_hearbeat_mode_set(unsigned int hb_mode)
{
	int ret = -1;

	if (hb_mode < R2HB_HEARTBEAT_NUM_MODES) {
		spin_lock(&r2hb_live_lock);
		r2hb_heartbeat_mode = hb_mode;
		ret = 0;
		spin_unlock(&r2hb_live_lock);
	}

	return ret;
}

void r2hb_exit(void)
{
}

int r2hb_init(void)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(r2hb_callbacks); i++)
		INIT_LIST_HEAD(&r2hb_callbacks[i].list);

	memset(r2hb_live_node_bitmap, 0, sizeof(r2hb_live_node_bitmap));

	return 0;
}

/* if we're already in a callback then we're already serialized by the sem */
static void r2hb_fill_node_map_from_callback(unsigned long *map,
					     unsigned bytes)
{
	BUG_ON(bytes < (BITS_TO_LONGS(R2NM_MAX_NODES) * sizeof(unsigned long)));

	memcpy(map, &r2hb_live_node_bitmap, bytes);
}

/*
 * get a map of all nodes that are heartbeating in any regions
 */
void r2hb_fill_node_map(unsigned long *map, unsigned bytes)
{
	/* callers want to serialize this map and callbacks so that they
	 * can trust that they don't miss nodes coming to the party */
	down_read(&r2hb_callback_sem);
	spin_lock(&r2hb_live_lock);
	r2hb_fill_node_map_from_callback(map, bytes);
	spin_unlock(&r2hb_live_lock);
	up_read(&r2hb_callback_sem);
}
EXPORT_SYMBOL_GPL(r2hb_fill_node_map);

/*
 * heartbeat configfs bits.  The heartbeat set is a default set under
 * the cluster set in nodemanager.c.
 */

/* heartbeat set */

struct r2hb_hb_group {
	struct config_group hs_group;
	/* some stuff? */
};

static struct r2hb_hb_group *to_r2hb_hb_group(struct config_group *group)
{
	return group ?
		container_of(group, struct r2hb_hb_group, hs_group)
		: NULL;
}

static struct config_item r2hb_config_item;

static struct config_item *r2hb_hb_group_make_item(struct config_group *group,
							  const char *name)
{
	int ret;

	if (strlen(name) > R2HB_MAX_REGION_NAME_LEN) {
		ret = -ENAMETOOLONG;
		goto free;
	}

	config_item_put(&r2hb_config_item);

	return &r2hb_config_item;
free:
	return ERR_PTR(ret);
}

static void r2hb_hb_group_drop_item(struct config_group *group,
					   struct config_item *item)
{
	if (r2hb_global_heartbeat_active()) {
		pr_notice("ramster: Heartbeat %s on region %s (%s)\n",
			"stopped/aborted", config_item_name(item),
			"no region");
	}

	config_item_put(item);
}

struct r2hb_hb_group_attribute {
	struct configfs_attribute attr;
	ssize_t (*show)(struct r2hb_hb_group *, char *);
	ssize_t (*store)(struct r2hb_hb_group *, const char *, size_t);
};

static ssize_t r2hb_hb_group_show(struct config_item *item,
					 struct configfs_attribute *attr,
					 char *page)
{
	struct r2hb_hb_group *reg = to_r2hb_hb_group(to_config_group(item));
	struct r2hb_hb_group_attribute *r2hb_hb_group_attr =
		container_of(attr, struct r2hb_hb_group_attribute, attr);
	ssize_t ret = 0;

	if (r2hb_hb_group_attr->show)
		ret = r2hb_hb_group_attr->show(reg, page);
	return ret;
}

static ssize_t r2hb_hb_group_store(struct config_item *item,
					  struct configfs_attribute *attr,
					  const char *page, size_t count)
{
	struct r2hb_hb_group *reg = to_r2hb_hb_group(to_config_group(item));
	struct r2hb_hb_group_attribute *r2hb_hb_group_attr =
		container_of(attr, struct r2hb_hb_group_attribute, attr);
	ssize_t ret = -EINVAL;

	if (r2hb_hb_group_attr->store)
		ret = r2hb_hb_group_attr->store(reg, page, count);
	return ret;
}

static ssize_t r2hb_hb_group_threshold_show(struct r2hb_hb_group *group,
						     char *page)
{
	return sprintf(page, "%u\n", r2hb_dead_threshold);
}

static ssize_t r2hb_hb_group_threshold_store(struct r2hb_hb_group *group,
						    const char *page,
						    size_t count)
{
	unsigned long tmp;
	char *p = (char *)page;
	int err;

	err = kstrtoul(p, 10, &tmp);
	if (err)
		return err;

	/* this will validate ranges for us. */
	r2hb_dead_threshold_set((unsigned int) tmp);

	return count;
}

static
ssize_t r2hb_hb_group_mode_show(struct r2hb_hb_group *group,
				       char *page)
{
	return sprintf(page, "%s\n",
		       r2hb_heartbeat_mode_desc[r2hb_heartbeat_mode]);
}

static
ssize_t r2hb_hb_group_mode_store(struct r2hb_hb_group *group,
					const char *page, size_t count)
{
	unsigned int i;
	int ret;
	size_t len;

	len = (page[count - 1] == '\n') ? count - 1 : count;
	if (!len)
		return -EINVAL;

	for (i = 0; i < R2HB_HEARTBEAT_NUM_MODES; ++i) {
		if (strnicmp(page, r2hb_heartbeat_mode_desc[i], len))
			continue;

		ret = r2hb_global_hearbeat_mode_set(i);
		if (!ret)
			pr_notice("ramster: Heartbeat mode set to %s\n",
			       r2hb_heartbeat_mode_desc[i]);
		return count;
	}

	return -EINVAL;

}

static struct r2hb_hb_group_attribute r2hb_hb_group_attr_threshold = {
	.attr	= { .ca_owner = THIS_MODULE,
		    .ca_name = "dead_threshold",
		    .ca_mode = S_IRUGO | S_IWUSR },
	.show	= r2hb_hb_group_threshold_show,
	.store	= r2hb_hb_group_threshold_store,
};

static struct r2hb_hb_group_attribute r2hb_hb_group_attr_mode = {
	.attr   = { .ca_owner = THIS_MODULE,
		.ca_name = "mode",
		.ca_mode = S_IRUGO | S_IWUSR },
	.show   = r2hb_hb_group_mode_show,
	.store  = r2hb_hb_group_mode_store,
};

static struct configfs_attribute *r2hb_hb_group_attrs[] = {
	&r2hb_hb_group_attr_threshold.attr,
	&r2hb_hb_group_attr_mode.attr,
	NULL,
};

static struct configfs_item_operations r2hb_hearbeat_group_item_ops = {
	.show_attribute		= r2hb_hb_group_show,
	.store_attribute	= r2hb_hb_group_store,
};

static struct configfs_group_operations r2hb_hb_group_group_ops = {
	.make_item	= r2hb_hb_group_make_item,
	.drop_item	= r2hb_hb_group_drop_item,
};

static struct config_item_type r2hb_hb_group_type = {
	.ct_group_ops	= &r2hb_hb_group_group_ops,
	.ct_item_ops	= &r2hb_hearbeat_group_item_ops,
	.ct_attrs	= r2hb_hb_group_attrs,
	.ct_owner	= THIS_MODULE,
};

/* this is just here to avoid touching group in heartbeat.h which the
 * entire damn world #includes */
struct config_group *r2hb_alloc_hb_set(void)
{
	struct r2hb_hb_group *hs = NULL;
	struct config_group *ret = NULL;

	hs = kzalloc(sizeof(struct r2hb_hb_group), GFP_KERNEL);
	if (hs == NULL)
		goto out;

	config_group_init_type_name(&hs->hs_group, "heartbeat",
				    &r2hb_hb_group_type);

	ret = &hs->hs_group;
out:
	if (ret == NULL)
		kfree(hs);
	return ret;
}

void r2hb_free_hb_set(struct config_group *group)
{
	struct r2hb_hb_group *hs = to_r2hb_hb_group(group);
	kfree(hs);
}

/* hb callback registration and issuing */

static struct r2hb_callback *hbcall_from_type(enum r2hb_callback_type type)
{
	if (type == R2HB_NUM_CB)
		return ERR_PTR(-EINVAL);

	return &r2hb_callbacks[type];
}

void r2hb_setup_callback(struct r2hb_callback_func *hc,
			 enum r2hb_callback_type type,
			 r2hb_cb_func *func,
			 void *data,
			 int priority)
{
	INIT_LIST_HEAD(&hc->hc_item);
	hc->hc_func = func;
	hc->hc_data = data;
	hc->hc_priority = priority;
	hc->hc_type = type;
	hc->hc_magic = R2HB_CB_MAGIC;
}
EXPORT_SYMBOL_GPL(r2hb_setup_callback);

int r2hb_register_callback(const char *region_uuid,
			   struct r2hb_callback_func *hc)
{
	struct r2hb_callback_func *tmp;
	struct list_head *iter;
	struct r2hb_callback *hbcall;
	int ret;

	BUG_ON(hc->hc_magic != R2HB_CB_MAGIC);
	BUG_ON(!list_empty(&hc->hc_item));

	hbcall = hbcall_from_type(hc->hc_type);
	if (IS_ERR(hbcall)) {
		ret = PTR_ERR(hbcall);
		goto out;
	}

	down_write(&r2hb_callback_sem);

	list_for_each(iter, &hbcall->list) {
		tmp = list_entry(iter, struct r2hb_callback_func, hc_item);
		if (hc->hc_priority < tmp->hc_priority) {
			list_add_tail(&hc->hc_item, iter);
			break;
		}
	}
	if (list_empty(&hc->hc_item))
		list_add_tail(&hc->hc_item, &hbcall->list);

	up_write(&r2hb_callback_sem);
	ret = 0;
out:
	mlog(ML_CLUSTER, "returning %d on behalf of %p for funcs %p\n",
	     ret, __builtin_return_address(0), hc);
	return ret;
}
EXPORT_SYMBOL_GPL(r2hb_register_callback);

void r2hb_unregister_callback(const char *region_uuid,
			      struct r2hb_callback_func *hc)
{
	BUG_ON(hc->hc_magic != R2HB_CB_MAGIC);

	mlog(ML_CLUSTER, "on behalf of %p for funcs %p\n",
	     __builtin_return_address(0), hc);

	/* XXX Can this happen _with_ a region reference? */
	if (list_empty(&hc->hc_item))
		return;

	down_write(&r2hb_callback_sem);

	list_del_init(&hc->hc_item);

	up_write(&r2hb_callback_sem);
}
EXPORT_SYMBOL_GPL(r2hb_unregister_callback);

int r2hb_check_node_heartbeating_from_callback(u8 node_num)
{
	unsigned long testing_map[BITS_TO_LONGS(R2NM_MAX_NODES)];

	r2hb_fill_node_map_from_callback(testing_map, sizeof(testing_map));
	if (!test_bit(node_num, testing_map)) {
		mlog(ML_HEARTBEAT,
		     "node (%u) does not have heartbeating enabled.\n",
		     node_num);
		return 0;
	}

	return 1;
}
EXPORT_SYMBOL_GPL(r2hb_check_node_heartbeating_from_callback);

void r2hb_stop_all_regions(void)
{
}
EXPORT_SYMBOL_GPL(r2hb_stop_all_regions);

/*
 * this is just a hack until we get the plumbing which flips file systems
 * read only and drops the hb ref instead of killing the node dead.
 */
int r2hb_global_heartbeat_active(void)
{
	return (r2hb_heartbeat_mode == R2HB_HEARTBEAT_GLOBAL);
}
EXPORT_SYMBOL(r2hb_global_heartbeat_active);

/* added for RAMster */
void r2hb_manual_set_node_heartbeating(int node_num)
{
	if (node_num < R2NM_MAX_NODES)
		set_bit(node_num, r2hb_live_node_bitmap);
}
EXPORT_SYMBOL(r2hb_manual_set_node_heartbeating);
+87 −0
Original line number Diff line number Diff line
/* -*- mode: c; c-basic-offset: 8; -*-
 * vim: noexpandtab sw=8 ts=8 sts=0:
 *
 * heartbeat.h
 *
 * Function prototypes
 *
 * Copyright (C) 2004 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 *
 */

#ifndef R2CLUSTER_HEARTBEAT_H
#define R2CLUSTER_HEARTBEAT_H

#define R2HB_REGION_TIMEOUT_MS		2000

#define R2HB_MAX_REGION_NAME_LEN	32

/* number of changes to be seen as live */
#define R2HB_LIVE_THRESHOLD	   2
/* number of equal samples to be seen as dead */
extern unsigned int r2hb_dead_threshold;
#define R2HB_DEFAULT_DEAD_THRESHOLD	   31
/* Otherwise MAX_WRITE_TIMEOUT will be zero... */
#define R2HB_MIN_DEAD_THRESHOLD	  2
#define R2HB_MAX_WRITE_TIMEOUT_MS \
	(R2HB_REGION_TIMEOUT_MS * (r2hb_dead_threshold - 1))

#define R2HB_CB_MAGIC		0x51d1e4ec

/* callback stuff */
enum r2hb_callback_type {
	R2HB_NODE_DOWN_CB = 0,
	R2HB_NODE_UP_CB,
	R2HB_NUM_CB
};

struct r2nm_node;
typedef void (r2hb_cb_func)(struct r2nm_node *, int, void *);

struct r2hb_callback_func {
	u32			hc_magic;
	struct list_head	hc_item;
	r2hb_cb_func		*hc_func;
	void			*hc_data;
	int			hc_priority;
	enum r2hb_callback_type hc_type;
};

struct config_group *r2hb_alloc_hb_set(void);
void r2hb_free_hb_set(struct config_group *group);

void r2hb_setup_callback(struct r2hb_callback_func *hc,
			 enum r2hb_callback_type type,
			 r2hb_cb_func *func,
			 void *data,
			 int priority);
int r2hb_register_callback(const char *region_uuid,
			   struct r2hb_callback_func *hc);
void r2hb_unregister_callback(const char *region_uuid,
			      struct r2hb_callback_func *hc);
void r2hb_fill_node_map(unsigned long *map,
			unsigned bytes);
void r2hb_exit(void);
int r2hb_init(void);
int r2hb_check_node_heartbeating_from_callback(u8 node_num);
void r2hb_stop_all_regions(void);
int r2hb_get_all_regions(char *region_uuids, u8 numregions);
int r2hb_global_heartbeat_active(void);
void r2hb_manual_set_node_heartbeating(int);

#endif /* R2CLUSTER_HEARTBEAT_H */
+155 −0
Original line number Diff line number Diff line
/* -*- mode: c; c-basic-offset: 8; -*-
 * vim: noexpandtab sw=8 ts=8 sts=0:
 *
 * Copyright (C) 2004, 2005, 2012 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/uaccess.h>

#include "masklog.h"

struct mlog_bits r2_mlog_and_bits = MLOG_BITS_RHS(MLOG_INITIAL_AND_MASK);
EXPORT_SYMBOL_GPL(r2_mlog_and_bits);
struct mlog_bits r2_mlog_not_bits = MLOG_BITS_RHS(0);
EXPORT_SYMBOL_GPL(r2_mlog_not_bits);

static ssize_t mlog_mask_show(u64 mask, char *buf)
{
	char *state;

	if (__mlog_test_u64(mask, r2_mlog_and_bits))
		state = "allow";
	else if (__mlog_test_u64(mask, r2_mlog_not_bits))
		state = "deny";
	else
		state = "off";

	return snprintf(buf, PAGE_SIZE, "%s\n", state);
}

static ssize_t mlog_mask_store(u64 mask, const char *buf, size_t count)
{
	if (!strnicmp(buf, "allow", 5)) {
		__mlog_set_u64(mask, r2_mlog_and_bits);
		__mlog_clear_u64(mask, r2_mlog_not_bits);
	} else if (!strnicmp(buf, "deny", 4)) {
		__mlog_set_u64(mask, r2_mlog_not_bits);
		__mlog_clear_u64(mask, r2_mlog_and_bits);
	} else if (!strnicmp(buf, "off", 3)) {
		__mlog_clear_u64(mask, r2_mlog_not_bits);
		__mlog_clear_u64(mask, r2_mlog_and_bits);
	} else
		return -EINVAL;

	return count;
}

struct mlog_attribute {
	struct attribute attr;
	u64 mask;
};

#define to_mlog_attr(_attr) container_of(_attr, struct mlog_attribute, attr)

#define define_mask(_name) {			\
	.attr = {				\
		.name = #_name,			\
		.mode = S_IRUGO | S_IWUSR,	\
	},					\
	.mask = ML_##_name,			\
}

static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = {
	define_mask(TCP),
	define_mask(MSG),
	define_mask(SOCKET),
	define_mask(HEARTBEAT),
	define_mask(HB_BIO),
	define_mask(DLMFS),
	define_mask(DLM),
	define_mask(DLM_DOMAIN),
	define_mask(DLM_THREAD),
	define_mask(DLM_MASTER),
	define_mask(DLM_RECOVERY),
	define_mask(DLM_GLUE),
	define_mask(VOTE),
	define_mask(CONN),
	define_mask(QUORUM),
	define_mask(BASTS),
	define_mask(CLUSTER),
	define_mask(ERROR),
	define_mask(NOTICE),
	define_mask(KTHREAD),
};

static struct attribute *mlog_attr_ptrs[MLOG_MAX_BITS] = {NULL, };

static ssize_t mlog_show(struct kobject *obj, struct attribute *attr,
			 char *buf)
{
	struct mlog_attribute *mlog_attr = to_mlog_attr(attr);

	return mlog_mask_show(mlog_attr->mask, buf);
}

static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
			  const char *buf, size_t count)
{
	struct mlog_attribute *mlog_attr = to_mlog_attr(attr);

	return mlog_mask_store(mlog_attr->mask, buf, count);
}

static const struct sysfs_ops mlog_attr_ops = {
	.show  = mlog_show,
	.store = mlog_store,
};

static struct kobj_type mlog_ktype = {
	.default_attrs = mlog_attr_ptrs,
	.sysfs_ops     = &mlog_attr_ops,
};

static struct kset mlog_kset = {
	.kobj   = {.ktype = &mlog_ktype},
};

int r2_mlog_sys_init(struct kset *r2cb_kset)
{
	int i = 0;

	while (mlog_attrs[i].attr.mode) {
		mlog_attr_ptrs[i] = &mlog_attrs[i].attr;
		i++;
	}
	mlog_attr_ptrs[i] = NULL;

	kobject_set_name(&mlog_kset.kobj, "logmask");
	mlog_kset.kobj.kset = r2cb_kset;
	return kset_register(&mlog_kset);
}

void r2_mlog_sys_shutdown(void)
{
	kset_unregister(&mlog_kset);
}
Loading