Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit da9cf505 authored by NeilBrown's avatar NeilBrown
Browse files

md: avoid spinlock problem in blk_throtl_exit



blk_throtl_exit assumes that ->queue_lock still exists,
so make sure that it does.
To do this, we stop redirecting ->queue_lock to conf->device_lock
and leave it pointing where it is initialised - __queue_lock.

As the blk_plug functions check the ->queue_lock is held, we now
take that spin_lock explicitly around the plug functions.  We don't
need the locking, just the warning removal.

This is needed for any kernel with the blk_throtl code, which is
which is 2.6.37 and later.

Cc: stable@kernel.org
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
parent 8f5f02c4
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -216,7 +216,6 @@ static int linear_run (mddev_t *mddev)

	if (md_check_no_bitmap(mddev))
		return -EINVAL;
	mddev->queue->queue_lock = &mddev->queue->__queue_lock;
	conf = linear_conf(mddev, mddev->raid_disks);

	if (!conf)
+0 −1
Original line number Diff line number Diff line
@@ -435,7 +435,6 @@ static int multipath_run (mddev_t *mddev)
	 * bookkeeping area. [whatever we allocate in multipath_run(),
	 * should be freed in multipath_stop()]
	 */
	mddev->queue->queue_lock = &mddev->queue->__queue_lock;

	conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
	mddev->private = conf;
+0 −1
Original line number Diff line number Diff line
@@ -361,7 +361,6 @@ static int raid0_run(mddev_t *mddev)
	if (md_check_no_bitmap(mddev))
		return -EINVAL;
	blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
	mddev->queue->queue_lock = &mddev->queue->__queue_lock;

	/* if private is not null, we are here after takeover */
	if (mddev->private == NULL) {
+4 −2
Original line number Diff line number Diff line
@@ -593,7 +593,10 @@ static int flush_pending_writes(conf_t *conf)
	if (conf->pending_bio_list.head) {
		struct bio *bio;
		bio = bio_list_get(&conf->pending_bio_list);
		/* Only take the spinlock to quiet a warning */
		spin_lock(conf->mddev->queue->queue_lock);
		blk_remove_plug(conf->mddev->queue);
		spin_unlock(conf->mddev->queue->queue_lock);
		spin_unlock_irq(&conf->device_lock);
		/* flush any pending bitmap writes to
		 * disk before proceeding w/ I/O */
@@ -959,7 +962,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
		atomic_inc(&r1_bio->remaining);
		spin_lock_irqsave(&conf->device_lock, flags);
		bio_list_add(&conf->pending_bio_list, mbio);
		blk_plug_device(mddev->queue);
		blk_plug_device_unlocked(mddev->queue);
		spin_unlock_irqrestore(&conf->device_lock, flags);
	}
	r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
@@ -2021,7 +2024,6 @@ static int run(mddev_t *mddev)
	if (IS_ERR(conf))
		return PTR_ERR(conf);

	mddev->queue->queue_lock = &conf->device_lock;
	list_for_each_entry(rdev, &mddev->disks, same_set) {
		disk_stack_limits(mddev->gendisk, rdev->bdev,
				  rdev->data_offset << 9);
+4 −3
Original line number Diff line number Diff line
@@ -662,7 +662,10 @@ static int flush_pending_writes(conf_t *conf)
	if (conf->pending_bio_list.head) {
		struct bio *bio;
		bio = bio_list_get(&conf->pending_bio_list);
		/* Spinlock only taken to quiet a warning */
		spin_lock(conf->mddev->queue->queue_lock);
		blk_remove_plug(conf->mddev->queue);
		spin_unlock(conf->mddev->queue->queue_lock);
		spin_unlock_irq(&conf->device_lock);
		/* flush any pending bitmap writes to disk
		 * before proceeding w/ I/O */
@@ -971,7 +974,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
		atomic_inc(&r10_bio->remaining);
		spin_lock_irqsave(&conf->device_lock, flags);
		bio_list_add(&conf->pending_bio_list, mbio);
		blk_plug_device(mddev->queue);
		blk_plug_device_unlocked(mddev->queue);
		spin_unlock_irqrestore(&conf->device_lock, flags);
	}

@@ -2304,8 +2307,6 @@ static int run(mddev_t *mddev)
	if (!conf)
		goto out;

	mddev->queue->queue_lock = &conf->device_lock;

	mddev->thread = conf->thread;
	conf->thread = NULL;

Loading