Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 24f0a487 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-linus-20190215' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

 - Ensure we insert into the hctx dispatch list, if a request is marked
   as DONTPREP (Jianchao)

 - NVMe pull request, single missing unlock on error fix (Keith)

 - MD pull request, single fix for a potentially data corrupting issue
   (Nate)

 - Floppy check_events regression fix (Yufen)

* tag 'for-linus-20190215' of git://git.kernel.dk/linux-block:
  md/raid1: don't clear bitmap bits on interrupted recovery.
  floppy: check_events callback should not return a negative number
  nvme-pci: add missing unlock for reset error
  blk-mq: insert rq with DONTPREP to hctx dispatch list when requeue
parents ae3fa8bd ace74f73
Loading
Loading
Loading
Loading
+10 −2
Original line number Diff line number Diff line
@@ -737,11 +737,19 @@ static void blk_mq_requeue_work(struct work_struct *work)
	spin_unlock_irq(&q->requeue_lock);

	list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
		if (!(rq->rq_flags & RQF_SOFTBARRIER))
		if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP)))
			continue;

		rq->rq_flags &= ~RQF_SOFTBARRIER;
		list_del_init(&rq->queuelist);
		/*
		 * If RQF_DONTPREP, rq has contained some driver specific
		 * data, so insert it to hctx dispatch list to avoid any
		 * merge.
		 */
		if (rq->rq_flags & RQF_DONTPREP)
			blk_mq_request_bypass_insert(rq, false);
		else
			blk_mq_sched_insert_request(rq, true, false, false);
	}

+1 −1
Original line number Diff line number Diff line
@@ -4075,7 +4075,7 @@ static unsigned int floppy_check_events(struct gendisk *disk,

	if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) {
		if (lock_fdc(drive))
			return -EINTR;
			return 0;
		poll_drive(false, 0);
		process_fd_request();
	}
+18 −10
Original line number Diff line number Diff line
@@ -1863,6 +1863,20 @@ static void end_sync_read(struct bio *bio)
		reschedule_retry(r1_bio);
}

static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
{
	sector_t sync_blocks = 0;
	sector_t s = r1_bio->sector;
	long sectors_to_go = r1_bio->sectors;

	/* make sure these bits don't get cleared. */
	do {
		md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
		s += sync_blocks;
		sectors_to_go -= sync_blocks;
	} while (sectors_to_go > 0);
}

static void end_sync_write(struct bio *bio)
{
	int uptodate = !bio->bi_status;
@@ -1874,15 +1888,7 @@ static void end_sync_write(struct bio *bio)
	struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;

	if (!uptodate) {
		sector_t sync_blocks = 0;
		sector_t s = r1_bio->sector;
		long sectors_to_go = r1_bio->sectors;
		/* make sure these bits doesn't get cleared. */
		do {
			md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
			s += sync_blocks;
			sectors_to_go -= sync_blocks;
		} while (sectors_to_go > 0);
		abort_sync_write(mddev, r1_bio);
		set_bit(WriteErrorSeen, &rdev->flags);
		if (!test_and_set_bit(WantReplacement, &rdev->flags))
			set_bit(MD_RECOVERY_NEEDED, &
@@ -2172,8 +2178,10 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
		     (i == r1_bio->read_disk ||
		      !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
			continue;
		if (test_bit(Faulty, &conf->mirrors[i].rdev->flags))
		if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
			abort_sync_write(mddev, r1_bio);
			continue;
		}

		bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
		if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
+5 −3
Original line number Diff line number Diff line
@@ -2560,15 +2560,15 @@ static void nvme_reset_work(struct work_struct *work)
	mutex_lock(&dev->shutdown_lock);
	result = nvme_pci_enable(dev);
	if (result)
		goto out;
		goto out_unlock;

	result = nvme_pci_configure_admin_queue(dev);
	if (result)
		goto out;
		goto out_unlock;

	result = nvme_alloc_admin_tags(dev);
	if (result)
		goto out;
		goto out_unlock;

	/*
	 * Limit the max command size to prevent iod->sg allocations going
@@ -2651,6 +2651,8 @@ static void nvme_reset_work(struct work_struct *work)
	nvme_start_ctrl(&dev->ctrl);
	return;

 out_unlock:
	mutex_unlock(&dev->shutdown_lock);
 out:
	nvme_remove_dead_ctrl(dev, result);
}