Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 80a0d644 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "Small collection of fixes that would be nice to have in -rc1. This
  contains:

   - NVMe pull request form Christoph, mostly with fixes for nvme-pci,
     host memory buffer in particular.

   - Error handling fixup for cgwb_create(), in case allocation of 'wb'
     fails. From Christophe Jaillet.

   - Ensure that trace_block_getrq() gets the 'dev' in an appropriate
     fashion, to avoid a potential NULL deref. From Greg Thelen.

   - Regression fix for dm-mq with blk-mq, fixing a problem with
     stacking IO schedulers. From me.

   - string.h fixup, fixing an issue with memcpy_and_pad(). This
     original change came in through an NVMe dependency, which is why
     I'm including it here. From Martin Wilck.

   - Fix potential int overflow in __blkdev_sectors_to_bio_pages(), from
     Mikulas.

   - MBR enable fix for sed-opal, from Scott"

* 'for-linus' of git://git.kernel.dk/linux-block:
  block: directly insert blk-mq request from blk_insert_cloned_request()
  mm/backing-dev.c: fix an error handling path in 'cgwb_create()'
  string.h: un-fortify memcpy_and_pad
  nvme-pci: implement the HMB entry number and size limitations
  nvme-pci: propagate (some) errors from host memory buffer setup
  nvme-pci: use appropriate initial chunk size for HMB allocation
  nvme-pci: fix host memory buffer allocation fallback
  nvme: fix lightnvm check
  block: fix integer overflow in __blkdev_sectors_to_bio_pages()
  block: sed-opal: Set MBRDone on S3 resume path if TPER is MBREnabled
  block: tolerate tracing of NULL bio
parents 20e52ee5 157f377b
Loading
Loading
Loading
Loading
+6 −1
Original line number Original line Diff line number Diff line
@@ -2342,7 +2342,12 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
	if (q->mq_ops) {
	if (q->mq_ops) {
		if (blk_queue_io_stat(q))
		if (blk_queue_io_stat(q))
			blk_account_io_start(rq, true);
			blk_account_io_start(rq, true);
		blk_mq_sched_insert_request(rq, false, true, false, false);
		/*
		 * Since we have a scheduler attached on the top device,
		 * bypass a potential scheduler on the bottom device for
		 * insert.
		 */
		blk_mq_request_bypass_insert(rq);
		return BLK_STS_OK;
		return BLK_STS_OK;
	}
	}


+2 −2
Original line number Original line Diff line number Diff line
@@ -269,9 +269,9 @@ static int __blkdev_issue_write_zeroes(struct block_device *bdev,
 */
 */
static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
{
{
	sector_t bytes = (nr_sects << 9) + PAGE_SIZE - 1;
	sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);


	return min(bytes >> PAGE_SHIFT, (sector_t)BIO_MAX_PAGES);
	return min(pages, (sector_t)BIO_MAX_PAGES);
}
}


/**
/**
+16 −0
Original line number Original line Diff line number Diff line
@@ -1401,6 +1401,22 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
	blk_mq_hctx_mark_pending(hctx, ctx);
	blk_mq_hctx_mark_pending(hctx, ctx);
}
}


/*
 * Should only be used carefully, when the caller knows we want to
 * bypass a potential IO scheduler on the target device.
 */
void blk_mq_request_bypass_insert(struct request *rq)
{
	struct blk_mq_ctx *ctx = rq->mq_ctx;
	struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);

	spin_lock(&hctx->lock);
	list_add_tail(&rq->queuelist, &hctx->dispatch);
	spin_unlock(&hctx->lock);

	blk_mq_run_hw_queue(hctx, false);
}

void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
			    struct list_head *list)
			    struct list_head *list)


+1 −0
Original line number Original line Diff line number Diff line
@@ -54,6 +54,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
 */
 */
void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
				bool at_head);
				bool at_head);
void blk_mq_request_bypass_insert(struct request *rq);
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
				struct list_head *list);
				struct list_head *list);


+1 −0
Original line number Original line Diff line number Diff line
@@ -46,6 +46,7 @@ enum opal_response_token {
#define GENERIC_HOST_SESSION_NUM 0x41
#define GENERIC_HOST_SESSION_NUM 0x41


#define TPER_SYNC_SUPPORTED 0x01
#define TPER_SYNC_SUPPORTED 0x01
#define MBR_ENABLED_MASK 0x10


#define TINY_ATOM_DATA_MASK 0x3F
#define TINY_ATOM_DATA_MASK 0x3F
#define TINY_ATOM_SIGNED 0x40
#define TINY_ATOM_SIGNED 0x40
Loading