Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7768ee3f authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull MD fixes from Shaohua Li:
 "Three small fixes for MD:

   - md-cluster fix for faulty device from Guoqing

   - writehint fix for writebehind IO for raid1 from Mariusz

   - a live lock fix for interrupted recovery from Yufen"

* tag 'md/4.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md:
  raid1: copy write hint from master bio to behind bio
  md/raid1: exit sync request if MD_RECOVERY_INTR is set
  md-cluster: don't update recovery_offset for faulty device
parents a9e5b732 dba40d46
Loading
Loading
Loading
Loading
+4 −2
Original line number Original line Diff line number Diff line
@@ -9256,9 +9256,11 @@ void md_reload_sb(struct mddev *mddev, int nr)
	check_sb_changes(mddev, rdev);
	check_sb_changes(mddev, rdev);


	/* Read all rdev's to update recovery_offset */
	/* Read all rdev's to update recovery_offset */
	rdev_for_each_rcu(rdev, mddev)
	rdev_for_each_rcu(rdev, mddev) {
		if (!test_bit(Faulty, &rdev->flags))
			read_rdev(mddev, rdev);
			read_rdev(mddev, rdev);
	}
	}
}
EXPORT_SYMBOL(md_reload_sb);
EXPORT_SYMBOL(md_reload_sb);


#ifndef MODULE
#ifndef MODULE
+20 −5
Original line number Original line Diff line number Diff line
@@ -854,7 +854,7 @@ static void flush_pending_writes(struct r1conf *conf)
 *    there is no normal IO happeing.  It must arrange to call
 *    there is no normal IO happeing.  It must arrange to call
 *    lower_barrier when the particular background IO completes.
 *    lower_barrier when the particular background IO completes.
 */
 */
static void raise_barrier(struct r1conf *conf, sector_t sector_nr)
static sector_t raise_barrier(struct r1conf *conf, sector_t sector_nr)
{
{
	int idx = sector_to_idx(sector_nr);
	int idx = sector_to_idx(sector_nr);


@@ -885,13 +885,23 @@ static void raise_barrier(struct r1conf *conf, sector_t sector_nr)
	 *    max resync count which allowed on current I/O barrier bucket.
	 *    max resync count which allowed on current I/O barrier bucket.
	 */
	 */
	wait_event_lock_irq(conf->wait_barrier,
	wait_event_lock_irq(conf->wait_barrier,
			    !conf->array_frozen &&
			    (!conf->array_frozen &&
			     !atomic_read(&conf->nr_pending[idx]) &&
			     !atomic_read(&conf->nr_pending[idx]) &&
			     atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH,
			     atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) ||
				test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery),
			    conf->resync_lock);
			    conf->resync_lock);


	if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
		atomic_dec(&conf->barrier[idx]);
		spin_unlock_irq(&conf->resync_lock);
		wake_up(&conf->wait_barrier);
		return -EINTR;
	}

	atomic_inc(&conf->nr_sync_pending);
	atomic_inc(&conf->nr_sync_pending);
	spin_unlock_irq(&conf->resync_lock);
	spin_unlock_irq(&conf->resync_lock);

	return 0;
}
}


static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
@@ -1092,6 +1102,8 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
		goto skip_copy;
		goto skip_copy;
	}
	}


	behind_bio->bi_write_hint = bio->bi_write_hint;

	while (i < vcnt && size) {
	while (i < vcnt && size) {
		struct page *page;
		struct page *page;
		int len = min_t(int, PAGE_SIZE, size);
		int len = min_t(int, PAGE_SIZE, size);
@@ -2662,9 +2674,12 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,


	bitmap_cond_end_sync(mddev->bitmap, sector_nr,
	bitmap_cond_end_sync(mddev->bitmap, sector_nr,
		mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
		mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
	r1_bio = raid1_alloc_init_r1buf(conf);


	raise_barrier(conf, sector_nr);

	if (raise_barrier(conf, sector_nr))
		return 0;

	r1_bio = raid1_alloc_init_r1buf(conf);


	rcu_read_lock();
	rcu_read_lock();
	/*
	/*