Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 040639b7 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull MD updates from Shaohua Li:
 "Some small fixes for MD:

   - fix raid5-cache potential problems if raid5 cache isn't fully
     recovered

   - fix a wait-within-wait warning in raid1/10

   - make raid5-PPL support disks with writeback cache enabled"

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md:
  raid5-ppl: PPL support for disks with write-back cache enabled
  md/r5cache: print more info of log recovery
  md/raid1,raid10: silence warning about wait-within-wait
  md: introduce new personality funciton start()
parents 20c59c71 1532d9e8
Loading
Loading
Loading
Loading
+4 −3
Original line number Diff line number Diff line
@@ -39,6 +39,7 @@ case the behavior is the same as in plain raid5.
PPL is available for md version-1 metadata and external (specifically IMSM)
metadata arrays. It can be enabled using mdadm option --consistency-policy=ppl.

Currently, volatile write-back cache should be disabled on all member drives
when using PPL. Otherwise it cannot guarantee consistency in case of power
failure.
There is a limitation of maximum 64 disks in the array for PPL. It allows to
keep data structures and implementation simple. RAID5 arrays with so many disks
are not likely due to high risk of multiple disks failure. Such restriction
should not be a real life limitation.
+9 −0
Original line number Diff line number Diff line
@@ -3151,6 +3151,14 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
		goto bad;
	}

	r = md_start(&rs->md);

	if (r) {
		ti->error = "Failed to start raid array";
		mddev_unlock(&rs->md);
		goto bad_md_start;
	}

	rs->callbacks.congested_fn = raid_is_congested;
	dm_table_add_target_callbacks(ti->table, &rs->callbacks);

@@ -3198,6 +3206,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
	mddev_unlock(&rs->md);
	return 0;

bad_md_start:
bad_journal_mode_set:
bad_stripe_cache:
bad_check_reshape:
+23 −8
Original line number Diff line number Diff line
@@ -711,7 +711,7 @@ static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev)
	return NULL;
}

static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)
struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev)
{
	struct md_rdev *rdev;

@@ -721,6 +721,7 @@ static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev)

	return NULL;
}
EXPORT_SYMBOL_GPL(md_find_rdev_rcu);

static struct md_personality *find_pers(int level, char *clevel)
{
@@ -5560,11 +5561,6 @@ int md_run(struct mddev *mddev)
	if (start_readonly && mddev->ro == 0)
		mddev->ro = 2; /* read-only, but switch on first write */

	/*
	 * NOTE: some pers->run(), for example r5l_recovery_log(), wakes
	 * up mddev->thread. It is important to initialize critical
	 * resources for mddev->thread BEFORE calling pers->run().
	 */
	err = pers->run(mddev);
	if (err)
		pr_warn("md: pers->run() failed ...\n");
@@ -5678,6 +5674,9 @@ static int do_md_run(struct mddev *mddev)
	if (mddev_is_clustered(mddev))
		md_allow_write(mddev);

	/* run start up tasks that require md_thread */
	md_start(mddev);

	md_wakeup_thread(mddev->thread);
	md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */

@@ -5689,6 +5688,21 @@ static int do_md_run(struct mddev *mddev)
	return err;
}

int md_start(struct mddev *mddev)
{
	int ret = 0;

	if (mddev->pers->start) {
		set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
		md_wakeup_thread(mddev->thread);
		ret = mddev->pers->start(mddev);
		clear_bit(MD_RECOVERY_WAIT, &mddev->recovery);
		md_wakeup_thread(mddev->sync_thread);
	}
	return ret;
}
EXPORT_SYMBOL_GPL(md_start);

static int restart_array(struct mddev *mddev)
{
	struct gendisk *disk = mddev->gendisk;
@@ -6997,7 +7011,7 @@ static int set_disk_faulty(struct mddev *mddev, dev_t dev)
		return -ENODEV;

	rcu_read_lock();
	rdev = find_rdev_rcu(mddev, dev);
	rdev = md_find_rdev_rcu(mddev, dev);
	if (!rdev)
		err =  -ENODEV;
	else {
@@ -8169,7 +8183,8 @@ void md_do_sync(struct md_thread *thread)
	int ret;

	/* just incase thread restarts... */
	if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
	if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
	    test_bit(MD_RECOVERY_WAIT, &mddev->recovery))
		return;
	if (mddev->ro) {/* never try to sync a read-only array */
		set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+9 −0
Original line number Diff line number Diff line
@@ -485,6 +485,7 @@ enum recovery_flags {
	MD_RECOVERY_RESHAPE,	/* A reshape is happening */
	MD_RECOVERY_FROZEN,	/* User request to abort, and not restart, any action */
	MD_RECOVERY_ERROR,	/* sync-action interrupted because io-error */
	MD_RECOVERY_WAIT,	/* waiting for pers->start() to finish */
};

static inline int __must_check mddev_lock(struct mddev *mddev)
@@ -523,7 +524,13 @@ struct md_personality
	struct list_head list;
	struct module *owner;
	bool (*make_request)(struct mddev *mddev, struct bio *bio);
	/*
	 * start up works that do NOT require md_thread. tasks that
	 * requires md_thread should go into start()
	 */
	int (*run)(struct mddev *mddev);
	/* start up works that require md threads */
	int (*start)(struct mddev *mddev);
	void (*free)(struct mddev *mddev, void *priv);
	void (*status)(struct seq_file *seq, struct mddev *mddev);
	/* error_handler must set ->faulty and clear ->in_sync
@@ -687,6 +694,7 @@ extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);

extern void mddev_init(struct mddev *mddev);
extern int md_run(struct mddev *mddev);
extern int md_start(struct mddev *mddev);
extern void md_stop(struct mddev *mddev);
extern void md_stop_writes(struct mddev *mddev);
extern int md_rdev_init(struct md_rdev *rdev);
@@ -702,6 +710,7 @@ extern void md_reload_sb(struct mddev *mddev, int raid_disk);
extern void md_update_sb(struct mddev *mddev, int force);
extern void md_kick_rdev_from_array(struct md_rdev * rdev);
struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev);

static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
{
+11 −0
Original line number Diff line number Diff line
@@ -815,6 +815,17 @@ static void flush_pending_writes(struct r1conf *conf)
		bio = bio_list_get(&conf->pending_bio_list);
		conf->pending_count = 0;
		spin_unlock_irq(&conf->device_lock);

		/*
		 * As this is called in a wait_event() loop (see freeze_array),
		 * current->state might be TASK_UNINTERRUPTIBLE which will
		 * cause a warning when we prepare to wait again.  As it is
		 * rare that this path is taken, it is perfectly safe to force
		 * us to go around the wait_event() loop again, so the warning
		 * is a false-positive.  Silence the warning by resetting
		 * thread state
		 */
		__set_current_state(TASK_RUNNING);
		blk_start_plug(&plug);
		flush_bio_list(conf, bio);
		blk_finish_plug(&plug);
Loading