Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0529613a authored by Neil Brown's avatar Neil Brown
Browse files

Merge branch 'for-neil' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/md into for-next

parents 5b1a4bf2 b5470dc5
Loading
Loading
Loading
Loading
+16 −11
Original line number Diff line number Diff line
@@ -4172,9 +4172,11 @@ static int get_bitmap_file(mddev_t * mddev, void __user * arg)
	char *ptr, *buf = NULL;
	int err = -ENOMEM;

	md_allow_write(mddev);

	if (md_allow_write(mddev))
		file = kmalloc(sizeof(*file), GFP_NOIO);
	else
		file = kmalloc(sizeof(*file), GFP_KERNEL);

	if (!file)
		goto out;

@@ -5667,15 +5669,18 @@ void md_write_end(mddev_t *mddev)
 * may proceed without blocking.  It is important to call this before
 * attempting a GFP_KERNEL allocation while holding the mddev lock.
 * Must be called with mddev_lock held.
 *
 * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock
 * is dropped, so return -EAGAIN after notifying userspace.
 */
void md_allow_write(mddev_t *mddev)
int md_allow_write(mddev_t *mddev)
{
	if (!mddev->pers)
		return;
		return 0;
	if (mddev->ro)
		return;
		return 0;
	if (!mddev->pers->sync_request)
		return;
		return 0;

	spin_lock_irq(&mddev->write_lock);
	if (mddev->in_sync) {
@@ -5686,14 +5691,14 @@ void md_allow_write(mddev_t *mddev)
			mddev->safemode = 1;
		spin_unlock_irq(&mddev->write_lock);
		md_update_sb(mddev, 0);

		sysfs_notify(&mddev->kobj, NULL, "array_state");
		/* wait for the dirty state to be recorded in the metadata */
		wait_event(mddev->sb_wait,
			   !test_bit(MD_CHANGE_CLEAN, &mddev->flags) &&
			   !test_bit(MD_CHANGE_PENDING, &mddev->flags));
	} else
		spin_unlock_irq(&mddev->write_lock);

	if (test_bit(MD_CHANGE_CLEAN, &mddev->flags))
		return -EAGAIN;
	else
		return 0;
}
EXPORT_SYMBOL_GPL(md_allow_write);

+4 −2
Original line number Diff line number Diff line
@@ -2136,7 +2136,7 @@ static int raid1_reshape(mddev_t *mddev)
	conf_t *conf = mddev_to_conf(mddev);
	int cnt, raid_disks;
	unsigned long flags;
	int d, d2;
	int d, d2, err;

	/* Cannot change chunk_size, layout, or level */
	if (mddev->chunk_size != mddev->new_chunk ||
@@ -2148,7 +2148,9 @@ static int raid1_reshape(mddev_t *mddev)
		return -EINVAL;
	}

	md_allow_write(mddev);
	err = md_allow_write(mddev);
	if (err)
		return err;

	raid_disks = mddev->raid_disks + mddev->delta_disks;

+9 −3
Original line number Diff line number Diff line
@@ -911,14 +911,16 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
	struct stripe_head *osh, *nsh;
	LIST_HEAD(newstripes);
	struct disk_info *ndisks;
	int err = 0;
	int err;
	struct kmem_cache *sc;
	int i;

	if (newsize <= conf->pool_size)
		return 0; /* never bother to shrink */

	md_allow_write(conf->mddev);
	err = md_allow_write(conf->mddev);
	if (err)
		return err;

	/* Step 1 */
	sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
@@ -3843,6 +3845,8 @@ raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
{
	raid5_conf_t *conf = mddev_to_conf(mddev);
	unsigned long new;
	int err;

	if (len >= PAGE_SIZE)
		return -EINVAL;
	if (!conf)
@@ -3858,7 +3862,9 @@ raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
		else
			break;
	}
	md_allow_write(mddev);
	err = md_allow_write(mddev);
	if (err)
		return err;
	while (new > conf->max_nr_stripes) {
		if (grow_one_stripe(conf))
			conf->max_nr_stripes++;
+1 −1
Original line number Diff line number Diff line
@@ -95,7 +95,7 @@ extern int sync_page_io(struct block_device *bdev, sector_t sector, int size,
			struct page *page, int rw);
extern void md_do_sync(mddev_t *mddev);
extern void md_new_event(mddev_t *mddev);
extern void md_allow_write(mddev_t *mddev);
extern int md_allow_write(mddev_t *mddev);
extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev);

#endif /* CONFIG_MD */