Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c4ce867f authored by Goldwyn Rodrigues's avatar Goldwyn Rodrigues
Browse files

Introduce md_cluster_info



md_cluster_info stores the cluster information in the MD device.

The join() is called when mddev detects it is a clustered device.
The main responsibilities are:
	1. Setup a DLM lockspace
	2. Setup all initial locks such as super block locks and bitmap lock (will come later)

The leave() clears up the lockspace and all the locks held.

Signed-off-by: default avatarGoldwyn Rodrigues <rgoldwyn@suse.com>
parent edb39c9d
Loading
Loading
Loading
Loading
+8 −1
Original line number Original line Diff line number Diff line
@@ -433,6 +433,7 @@ void bitmap_update_sb(struct bitmap *bitmap)
	/* This might have been changed by a reshape */
	/* This might have been changed by a reshape */
	sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
	sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
	sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize);
	sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize);
	sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes);
	sb->sectors_reserved = cpu_to_le32(bitmap->mddev->
	sb->sectors_reserved = cpu_to_le32(bitmap->mddev->
					   bitmap_info.space);
					   bitmap_info.space);
	kunmap_atomic(sb);
	kunmap_atomic(sb);
@@ -544,6 +545,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
	bitmap_super_t *sb;
	bitmap_super_t *sb;
	unsigned long chunksize, daemon_sleep, write_behind;
	unsigned long chunksize, daemon_sleep, write_behind;
	unsigned long long events;
	unsigned long long events;
	int nodes = 0;
	unsigned long sectors_reserved = 0;
	unsigned long sectors_reserved = 0;
	int err = -EINVAL;
	int err = -EINVAL;
	struct page *sb_page;
	struct page *sb_page;
@@ -583,6 +585,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
	daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
	daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
	write_behind = le32_to_cpu(sb->write_behind);
	write_behind = le32_to_cpu(sb->write_behind);
	sectors_reserved = le32_to_cpu(sb->sectors_reserved);
	sectors_reserved = le32_to_cpu(sb->sectors_reserved);
	nodes = le32_to_cpu(sb->nodes);


	/* verify that the bitmap-specific fields are valid */
	/* verify that the bitmap-specific fields are valid */
	if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
	if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
@@ -643,6 +646,7 @@ out_no_sb:
	bitmap->mddev->bitmap_info.chunksize = chunksize;
	bitmap->mddev->bitmap_info.chunksize = chunksize;
	bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
	bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
	bitmap->mddev->bitmap_info.max_write_behind = write_behind;
	bitmap->mddev->bitmap_info.max_write_behind = write_behind;
	bitmap->mddev->bitmap_info.nodes = nodes;
	if (bitmap->mddev->bitmap_info.space == 0 ||
	if (bitmap->mddev->bitmap_info.space == 0 ||
	    bitmap->mddev->bitmap_info.space > sectors_reserved)
	    bitmap->mddev->bitmap_info.space > sectors_reserved)
		bitmap->mddev->bitmap_info.space = sectors_reserved;
		bitmap->mddev->bitmap_info.space = sectors_reserved;
@@ -2186,6 +2190,8 @@ __ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store);


static ssize_t metadata_show(struct mddev *mddev, char *page)
static ssize_t metadata_show(struct mddev *mddev, char *page)
{
{
	if (mddev_is_clustered(mddev))
		return sprintf(page, "clustered\n");
	return sprintf(page, "%s\n", (mddev->bitmap_info.external
	return sprintf(page, "%s\n", (mddev->bitmap_info.external
				      ? "external" : "internal"));
				      ? "external" : "internal"));
}
}
@@ -2198,7 +2204,8 @@ static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len)
		return -EBUSY;
		return -EBUSY;
	if (strncmp(buf, "external", 8) == 0)
	if (strncmp(buf, "external", 8) == 0)
		mddev->bitmap_info.external = 1;
		mddev->bitmap_info.external = 1;
	else if (strncmp(buf, "internal", 8) == 0)
	else if ((strncmp(buf, "internal", 8) == 0) ||
			(strncmp(buf, "clustered", 9) == 0))
		mddev->bitmap_info.external = 0;
		mddev->bitmap_info.external = 0;
	else
	else
		return -EINVAL;
		return -EINVAL;
+62 −3
Original line number Original line Diff line number Diff line
@@ -22,8 +22,16 @@ struct dlm_lock_resource {
	struct dlm_lksb lksb;
	struct dlm_lksb lksb;
	char *name; /* lock name. */
	char *name; /* lock name. */
	uint32_t flags; /* flags to pass to dlm_lock() */
	uint32_t flags; /* flags to pass to dlm_lock() */
	void (*bast)(void *arg, int mode); /* blocking AST function pointer*/
	struct completion completion; /* completion for synchronized locking */
	struct completion completion; /* completion for synchronized locking */
	void (*bast)(void *arg, int mode); /* blocking AST function pointer*/
	struct mddev *mddev; /* pointing back to mddev. */
};

struct md_cluster_info {
	/* dlm lock space and resources for clustered raid. */
	dlm_lockspace_t *lockspace;
	struct dlm_lock_resource *sb_lock;
	struct mutex sb_mutex;
};
};


static void sync_ast(void *arg)
static void sync_ast(void *arg)
@@ -53,16 +61,18 @@ static int dlm_unlock_sync(struct dlm_lock_resource *res)
	return dlm_lock_sync(res, DLM_LOCK_NL);
	return dlm_lock_sync(res, DLM_LOCK_NL);
}
}


static struct dlm_lock_resource *lockres_init(dlm_lockspace_t *lockspace,
static struct dlm_lock_resource *lockres_init(struct mddev *mddev,
		char *name, void (*bastfn)(void *arg, int mode), int with_lvb)
		char *name, void (*bastfn)(void *arg, int mode), int with_lvb)
{
{
	struct dlm_lock_resource *res = NULL;
	struct dlm_lock_resource *res = NULL;
	int ret, namelen;
	int ret, namelen;
	struct md_cluster_info *cinfo = mddev->cluster_info;


	res = kzalloc(sizeof(struct dlm_lock_resource), GFP_KERNEL);
	res = kzalloc(sizeof(struct dlm_lock_resource), GFP_KERNEL);
	if (!res)
	if (!res)
		return NULL;
		return NULL;
	res->ls = lockspace;
	res->ls = cinfo->lockspace;
	res->mddev = mddev;
	namelen = strlen(name);
	namelen = strlen(name);
	res->name = kzalloc(namelen + 1, GFP_KERNEL);
	res->name = kzalloc(namelen + 1, GFP_KERNEL);
	if (!res->name) {
	if (!res->name) {
@@ -114,13 +124,62 @@ static void lockres_free(struct dlm_lock_resource *res)
	kfree(res);
	kfree(res);
}
}


static char *pretty_uuid(char *dest, char *src)
{
	int i, len = 0;

	for (i = 0; i < 16; i++) {
		if (i == 4 || i == 6 || i == 8 || i == 10)
			len += sprintf(dest + len, "-");
		len += sprintf(dest + len, "%02x", (__u8)src[i]);
	}
	return dest;
}

static int join(struct mddev *mddev, int nodes)
static int join(struct mddev *mddev, int nodes)
{
{
	struct md_cluster_info *cinfo;
	int ret;
	char str[64];

	if (!try_module_get(THIS_MODULE))
		return -ENOENT;

	cinfo = kzalloc(sizeof(struct md_cluster_info), GFP_KERNEL);
	if (!cinfo)
		return -ENOMEM;

	memset(str, 0, 64);
	pretty_uuid(str, mddev->uuid);
	ret = dlm_new_lockspace(str, NULL, DLM_LSFL_FS, LVB_SIZE,
				NULL, NULL, NULL, &cinfo->lockspace);
	if (ret)
		goto err;
	cinfo->sb_lock = lockres_init(mddev, "cmd-super",
					NULL, 0);
	if (!cinfo->sb_lock) {
		ret = -ENOMEM;
		goto err;
	}
	mutex_init(&cinfo->sb_mutex);
	mddev->cluster_info = cinfo;
	return 0;
	return 0;
err:
	if (cinfo->lockspace)
		dlm_release_lockspace(cinfo->lockspace, 2);
	kfree(cinfo);
	module_put(THIS_MODULE);
	return ret;
}
}


static int leave(struct mddev *mddev)
static int leave(struct mddev *mddev)
{
{
	struct md_cluster_info *cinfo = mddev->cluster_info;

	if (!cinfo)
		return 0;
	lockres_free(cinfo->sb_lock);
	dlm_release_lockspace(cinfo->lockspace, 2);
	return 0;
	return 0;
}
}


+2 −0
Original line number Original line Diff line number Diff line
@@ -7279,6 +7279,8 @@ int md_setup_cluster(struct mddev *mddev, int nodes)


void md_cluster_stop(struct mddev *mddev)
void md_cluster_stop(struct mddev *mddev)
{
{
	if (!md_cluster_ops)
		return;
	md_cluster_ops->leave(mddev);
	md_cluster_ops->leave(mddev);
	module_put(md_cluster_mod);
	module_put(md_cluster_mod);
}
}
+8 −0
Original line number Original line Diff line number Diff line
@@ -203,6 +203,8 @@ extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
				int is_new);
				int is_new);
extern void md_ack_all_badblocks(struct badblocks *bb);
extern void md_ack_all_badblocks(struct badblocks *bb);


struct md_cluster_info;

struct mddev {
struct mddev {
	void				*private;
	void				*private;
	struct md_personality		*pers;
	struct md_personality		*pers;
@@ -431,6 +433,7 @@ struct mddev {
		unsigned long		daemon_sleep; /* how many jiffies between updates? */
		unsigned long		daemon_sleep; /* how many jiffies between updates? */
		unsigned long		max_write_behind; /* write-behind mode */
		unsigned long		max_write_behind; /* write-behind mode */
		int			external;
		int			external;
		int			nodes; /* Maximum number of nodes in the cluster */
	} bitmap_info;
	} bitmap_info;


	atomic_t			max_corr_read_errors; /* max read retries */
	atomic_t			max_corr_read_errors; /* max read retries */
@@ -449,6 +452,7 @@ struct mddev {
	struct work_struct flush_work;
	struct work_struct flush_work;
	struct work_struct event_work;	/* used by dm to report failure event */
	struct work_struct event_work;	/* used by dm to report failure event */
	void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
	void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev);
	struct md_cluster_info		*cluster_info;
};
};


static inline int __must_check mddev_lock(struct mddev *mddev)
static inline int __must_check mddev_lock(struct mddev *mddev)
@@ -676,4 +680,8 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
}
}


extern struct md_cluster_operations *md_cluster_ops;
extern struct md_cluster_operations *md_cluster_ops;
static inline int mddev_is_clustered(struct mddev *mddev)
{
	return mddev->cluster_info && mddev->bitmap_info.nodes > 1;
}
#endif /* _MD_MD_H */
#endif /* _MD_MD_H */