Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 501d9f79 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "Unfortunately a few issues that warrant sending another pull request,
  even if I had hoped to avoid it. This contains:

   - A fix for multiqueue xen-blkback, on tear down / disconnect.

   - A few fixups for NVMe, including a wrong bit definition, fix for
     host memory buffers, and an nvme rdma page size fix"

* 'for-linus' of git://git.kernel.dk/linux-block:
  nvme: fix the definition of the doorbell buffer config support bit
  nvme-pci: use dma memory for the host memory buffer descriptors
  nvme-rdma: default MR page size to 4k
  xen-blkback: stop blkback thread of every queue in xen_blkif_disconnect
parents 73adb8c5 7ef10f3c
Loading
Loading
Loading
Loading
+8 −2
Original line number Diff line number Diff line
@@ -244,6 +244,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
{
	struct pending_req *req, *n;
	unsigned int j, r;
	bool busy = false;

	for (r = 0; r < blkif->nr_rings; r++) {
		struct xen_blkif_ring *ring = &blkif->rings[r];
@@ -261,8 +262,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
		 * don't have any discard_io or other_io requests. So, checking
		 * for inflight IO is enough.
		 */
		if (atomic_read(&ring->inflight) > 0)
			return -EBUSY;
		if (atomic_read(&ring->inflight) > 0) {
			busy = true;
			continue;
		}

		if (ring->irq) {
			unbind_from_irqhandler(ring->irq, ring);
@@ -300,6 +303,9 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
		WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
		ring->active = false;
	}
	if (busy)
		return -EBUSY;

	blkif->nr_ring_pages = 0;
	/*
	 * blkif->rings was allocated in connect_ring, so we should free it in
+11 −11
Original line number Diff line number Diff line
@@ -109,6 +109,7 @@ struct nvme_dev {
	/* host memory buffer support: */
	u64 host_mem_size;
	u32 nr_host_mem_descs;
	dma_addr_t host_mem_descs_dma;
	struct nvme_host_mem_buf_desc *host_mem_descs;
	void **host_mem_desc_bufs;
};
@@ -1565,16 +1566,10 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)

static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
{
	size_t len = dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs);
	u64 dma_addr = dev->host_mem_descs_dma;
	struct nvme_command c;
	u64 dma_addr;
	int ret;

	dma_addr = dma_map_single(dev->dev, dev->host_mem_descs, len,
			DMA_TO_DEVICE);
	if (dma_mapping_error(dev->dev, dma_addr))
		return -ENOMEM;

	memset(&c, 0, sizeof(c));
	c.features.opcode	= nvme_admin_set_features;
	c.features.fid		= cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
@@ -1591,7 +1586,6 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
			 "failed to set host mem (err %d, flags %#x).\n",
			 ret, bits);
	}
	dma_unmap_single(dev->dev, dma_addr, len, DMA_TO_DEVICE);
	return ret;
}

@@ -1609,7 +1603,9 @@ static void nvme_free_host_mem(struct nvme_dev *dev)

	kfree(dev->host_mem_desc_bufs);
	dev->host_mem_desc_bufs = NULL;
	kfree(dev->host_mem_descs);
	dma_free_coherent(dev->dev,
			dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
			dev->host_mem_descs, dev->host_mem_descs_dma);
	dev->host_mem_descs = NULL;
}

@@ -1617,6 +1613,7 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
{
	struct nvme_host_mem_buf_desc *descs;
	u32 chunk_size, max_entries, len;
	dma_addr_t descs_dma;
	int i = 0;
	void **bufs;
	u64 size = 0, tmp;
@@ -1627,7 +1624,8 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
	tmp = (preferred + chunk_size - 1);
	do_div(tmp, chunk_size);
	max_entries = tmp;
	descs = kcalloc(max_entries, sizeof(*descs), GFP_KERNEL);
	descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs),
			&descs_dma, GFP_KERNEL);
	if (!descs)
		goto out;

@@ -1661,6 +1659,7 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
	dev->nr_host_mem_descs = i;
	dev->host_mem_size = size;
	dev->host_mem_descs = descs;
	dev->host_mem_descs_dma = descs_dma;
	dev->host_mem_desc_bufs = bufs;
	return 0;

@@ -1674,7 +1673,8 @@ static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)

	kfree(bufs);
out_free_descs:
	kfree(descs);
	dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs,
			descs_dma);
out:
	/* try a smaller chunk size if we failed early */
	if (chunk_size >= PAGE_SIZE * 2 && (i == 0 || size < min)) {
+6 −2
Original line number Diff line number Diff line
@@ -920,7 +920,11 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
	struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
	int nr;

	nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, PAGE_SIZE);
	/*
	 * Align the MR to a 4K page size to match the ctrl page size and
	 * the block virtual boundary.
	 */
	nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, SZ_4K);
	if (nr < count) {
		if (nr < 0)
			return nr;
@@ -1583,7 +1587,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl)
		goto out_cleanup_queue;

	ctrl->ctrl.max_hw_sectors =
		(ctrl->max_fr_pages - 1) << (PAGE_SHIFT - 9);
		(ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9);

	error = nvme_init_identify(&ctrl->ctrl);
	if (error)
+1 −1
Original line number Diff line number Diff line
@@ -254,7 +254,7 @@ enum {
	NVME_CTRL_VWC_PRESENT			= 1 << 0,
	NVME_CTRL_OACS_SEC_SUPP                 = 1 << 0,
	NVME_CTRL_OACS_DIRECTIVES		= 1 << 5,
	NVME_CTRL_OACS_DBBUF_SUPP		= 1 << 7,
	NVME_CTRL_OACS_DBBUF_SUPP		= 1 << 8,
};

struct nvme_lbaf {