Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bd3813d5 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'md/3.14-fixes' of git://neil.brown.name/md

Pull md fixes from Neil Brown:
 "Two bugfixes for md

  both tagged for -stable"

* tag 'md/3.14-fixes' of git://neil.brown.name/md:
  md/raid5: Fix CPU hotplug callback registration
  md/raid1: restore ability for check and repair to fix read errors.
parents c1b8ae03 789b5e03
Loading
Loading
Loading
Loading
+10 −3
Original line number Diff line number Diff line
@@ -1953,11 +1953,15 @@ static int process_checks(struct r1bio *r1_bio)
	for (i = 0; i < conf->raid_disks * 2; i++) {
		int j;
		int size;
		int uptodate;
		struct bio *b = r1_bio->bios[i];
		if (b->bi_end_io != end_sync_read)
			continue;
		/* fixup the bio for reuse */
		/* fixup the bio for reuse, but preserve BIO_UPTODATE */
		uptodate = test_bit(BIO_UPTODATE, &b->bi_flags);
		bio_reset(b);
		if (!uptodate)
			clear_bit(BIO_UPTODATE, &b->bi_flags);
		b->bi_vcnt = vcnt;
		b->bi_iter.bi_size = r1_bio->sectors << 9;
		b->bi_iter.bi_sector = r1_bio->sector +
@@ -1990,11 +1994,14 @@ static int process_checks(struct r1bio *r1_bio)
		int j;
		struct bio *pbio = r1_bio->bios[primary];
		struct bio *sbio = r1_bio->bios[i];
		int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags);

		if (sbio->bi_end_io != end_sync_read)
			continue;
		/* Now we can 'fixup' the BIO_UPTODATE flag */
		set_bit(BIO_UPTODATE, &sbio->bi_flags);

		if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
		if (uptodate) {
			for (j = vcnt; j-- ; ) {
				struct page *p, *s;
				p = pbio->bi_io_vec[j].bv_page;
@@ -2009,7 +2016,7 @@ static int process_checks(struct r1bio *r1_bio)
		if (j >= 0)
			atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
		if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
			      && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
			      && uptodate)) {
			/* No need to write to this device. */
			sbio->bi_end_io = NULL;
			rdev_dec_pending(conf->mirrors[i].rdev, mddev);
+44 −46
Original line number Diff line number Diff line
@@ -5514,23 +5514,43 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
	return sectors * (raid_disks - conf->max_degraded);
}

static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
{
	safe_put_page(percpu->spare_page);
	kfree(percpu->scribble);
	percpu->spare_page = NULL;
	percpu->scribble = NULL;
}

static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
{
	if (conf->level == 6 && !percpu->spare_page)
		percpu->spare_page = alloc_page(GFP_KERNEL);
	if (!percpu->scribble)
		percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);

	if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
		free_scratch_buffer(conf, percpu);
		return -ENOMEM;
	}

	return 0;
}

static void raid5_free_percpu(struct r5conf *conf)
{
	struct raid5_percpu *percpu;
	unsigned long cpu;

	if (!conf->percpu)
		return;

	get_online_cpus();
	for_each_possible_cpu(cpu) {
		percpu = per_cpu_ptr(conf->percpu, cpu);
		safe_put_page(percpu->spare_page);
		kfree(percpu->scribble);
	}
#ifdef CONFIG_HOTPLUG_CPU
	unregister_cpu_notifier(&conf->cpu_notify);
#endif

	get_online_cpus();
	for_each_possible_cpu(cpu)
		free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
	put_online_cpus();

	free_percpu(conf->percpu);
@@ -5557,15 +5577,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
	switch (action) {
	case CPU_UP_PREPARE:
	case CPU_UP_PREPARE_FROZEN:
		if (conf->level == 6 && !percpu->spare_page)
			percpu->spare_page = alloc_page(GFP_KERNEL);
		if (!percpu->scribble)
			percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);

		if (!percpu->scribble ||
		    (conf->level == 6 && !percpu->spare_page)) {
			safe_put_page(percpu->spare_page);
			kfree(percpu->scribble);
		if (alloc_scratch_buffer(conf, percpu)) {
			pr_err("%s: failed memory allocation for cpu%ld\n",
			       __func__, cpu);
			return notifier_from_errno(-ENOMEM);
@@ -5573,10 +5585,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
		break;
	case CPU_DEAD:
	case CPU_DEAD_FROZEN:
		safe_put_page(percpu->spare_page);
		kfree(percpu->scribble);
		percpu->spare_page = NULL;
		percpu->scribble = NULL;
		free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
		break;
	default:
		break;
@@ -5588,40 +5597,29 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
static int raid5_alloc_percpu(struct r5conf *conf)
{
	unsigned long cpu;
	struct page *spare_page;
	struct raid5_percpu __percpu *allcpus;
	void *scribble;
	int err;
	int err = 0;

	allcpus = alloc_percpu(struct raid5_percpu);
	if (!allcpus)
	conf->percpu = alloc_percpu(struct raid5_percpu);
	if (!conf->percpu)
		return -ENOMEM;
	conf->percpu = allcpus;

	get_online_cpus();
	err = 0;
	for_each_present_cpu(cpu) {
		if (conf->level == 6) {
			spare_page = alloc_page(GFP_KERNEL);
			if (!spare_page) {
				err = -ENOMEM;
				break;
			}
			per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
		}
		scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
		if (!scribble) {
			err = -ENOMEM;
			break;
		}
		per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
	}
#ifdef CONFIG_HOTPLUG_CPU
	conf->cpu_notify.notifier_call = raid456_cpu_notify;
	conf->cpu_notify.priority = 0;
	if (err == 0)
	err = register_cpu_notifier(&conf->cpu_notify);
	if (err)
		return err;
#endif

	get_online_cpus();
	for_each_present_cpu(cpu) {
		err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
		if (err) {
			pr_err("%s: failed memory allocation for cpu%ld\n",
			       __func__, cpu);
			break;
		}
	}
	put_online_cpus();

	return err;