Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 43baed34 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull more block layer patches from Jens Axboe:
 "A few later arrivers that I didn't fold into the first pull request,
  so we had a chance to run some testing.  This contains:

   - NVMe:
        - Set of fixes from Keith
        - 4.4 and earlier gcc build fix from Andrew

   - small set of xen-blk{back,front} fixes from Bob Liu.

   - warnings fix for bogus inline statement in I_BDEV() from Geert.

   - error code fixup for SG_IO ioctl from Paolo Bonzini"

* 'for-linus' of git://git.kernel.dk/linux-block:
  drivers/block/nvme-core.c: fix build with gcc-4.4.4
  bdi: Remove "inline" keyword from exported I_BDEV() implementation
  block: fix bogus EFAULT error from SG_IO ioctl
  NVMe: Fix filesystem deadlock on removal
  NVMe: Failed controller initialization fixes
  NVMe: Unify controller probe and resume
  NVMe: Don't use fake status on cancelled command
  NVMe: Fix device cleanup on initialization failure
  drivers: xen-blkfront: only talk_to_blkback() when in XenbusStateInitialising
  xen/block: add multi-page ring support
  driver: xen-blkfront: move talk_to_blkback to a more suitable place
  drivers: xen-blkback: delay pending_req allocation to connect_ring
parents 6aaf0da8 e44ac588
Loading
Loading
Loading
Loading
+2 −2
Original line number Original line Diff line number Diff line
@@ -326,8 +326,8 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
			goto out_put_request;
			goto out_put_request;
	}
	}


	ret = -EFAULT;
	ret = blk_fill_sghdr_rq(q, rq, hdr, mode);
	if (blk_fill_sghdr_rq(q, rq, hdr, mode))
	if (ret < 0)
		goto out_free_cdb;
		goto out_free_cdb;


	ret = 0;
	ret = 0;
+80 −58
Original line number Original line Diff line number Diff line
@@ -193,6 +193,13 @@ static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
	return 0;
	return 0;
}
}


static void nvme_admin_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
	struct nvme_queue *nvmeq = hctx->driver_data;

	nvmeq->tags = NULL;
}

static int nvme_admin_init_request(void *data, struct request *req,
static int nvme_admin_init_request(void *data, struct request *req,
				unsigned int hctx_idx, unsigned int rq_idx,
				unsigned int hctx_idx, unsigned int rq_idx,
				unsigned int numa_node)
				unsigned int numa_node)
@@ -606,6 +613,9 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
			return;
			return;
		}
		}
		if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
		if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
			if (cmd_rq->ctx == CMD_CTX_CANCELLED)
				req->errors = -EINTR;
			else
				req->errors = status;
				req->errors = status;
		} else {
		} else {
			req->errors = nvme_error_status(status);
			req->errors = nvme_error_status(status);
@@ -1161,12 +1171,13 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)


int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id)
int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id)
{
{
	struct nvme_command c = {
	struct nvme_command c = { };
		.identify.opcode = nvme_admin_identify,
		.identify.cns = cpu_to_le32(1),
	};
	int error;
	int error;


	/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
	c.identify.opcode = nvme_admin_identify;
	c.identify.cns = cpu_to_le32(1);

	*id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
	*id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL);
	if (!*id)
	if (!*id)
		return -ENOMEM;
		return -ENOMEM;
@@ -1181,12 +1192,13 @@ int nvme_identify_ctrl(struct nvme_dev *dev, struct nvme_id_ctrl **id)
int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid,
int nvme_identify_ns(struct nvme_dev *dev, unsigned nsid,
		struct nvme_id_ns **id)
		struct nvme_id_ns **id)
{
{
	struct nvme_command c = {
	struct nvme_command c = { };
		.identify.opcode = nvme_admin_identify,
		.identify.nsid = cpu_to_le32(nsid),
	};
	int error;
	int error;


	/* gcc-4.4.4 (at least) has issues with initializers and anon unions */
	c.identify.opcode = nvme_admin_identify,
	c.identify.nsid = cpu_to_le32(nsid),

	*id = kmalloc(sizeof(struct nvme_id_ns), GFP_KERNEL);
	*id = kmalloc(sizeof(struct nvme_id_ns), GFP_KERNEL);
	if (!*id)
	if (!*id)
		return -ENOMEM;
		return -ENOMEM;
@@ -1230,14 +1242,14 @@ int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,


int nvme_get_log_page(struct nvme_dev *dev, struct nvme_smart_log **log)
int nvme_get_log_page(struct nvme_dev *dev, struct nvme_smart_log **log)
{
{
	struct nvme_command c = {
	struct nvme_command c = { };
		.common.opcode = nvme_admin_get_log_page,
	int error;
		.common.nsid = cpu_to_le32(0xFFFFFFFF),

		.common.cdw10[0] = cpu_to_le32(
	c.common.opcode = nvme_admin_get_log_page,
	c.common.nsid = cpu_to_le32(0xFFFFFFFF),
	c.common.cdw10[0] = cpu_to_le32(
			(((sizeof(struct nvme_smart_log) / 4) - 1) << 16) |
			(((sizeof(struct nvme_smart_log) / 4) - 1) << 16) |
			 NVME_LOG_SMART),
			 NVME_LOG_SMART),
	};
	int error;


	*log = kmalloc(sizeof(struct nvme_smart_log), GFP_KERNEL);
	*log = kmalloc(sizeof(struct nvme_smart_log), GFP_KERNEL);
	if (!*log)
	if (!*log)
@@ -1606,6 +1618,7 @@ static struct blk_mq_ops nvme_mq_admin_ops = {
	.queue_rq	= nvme_queue_rq,
	.queue_rq	= nvme_queue_rq,
	.map_queue	= blk_mq_map_queue,
	.map_queue	= blk_mq_map_queue,
	.init_hctx	= nvme_admin_init_hctx,
	.init_hctx	= nvme_admin_init_hctx,
	.exit_hctx      = nvme_admin_exit_hctx,
	.init_request	= nvme_admin_init_request,
	.init_request	= nvme_admin_init_request,
	.timeout	= nvme_timeout,
	.timeout	= nvme_timeout,
};
};
@@ -1648,6 +1661,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
		}
		}
		if (!blk_get_queue(dev->admin_q)) {
		if (!blk_get_queue(dev->admin_q)) {
			nvme_dev_remove_admin(dev);
			nvme_dev_remove_admin(dev);
			dev->admin_q = NULL;
			return -ENODEV;
			return -ENODEV;
		}
		}
	} else
	} else
@@ -2349,6 +2363,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
	}
	}
	kfree(ctrl);
	kfree(ctrl);


	if (!dev->tagset.tags) {
		dev->tagset.ops = &nvme_mq_ops;
		dev->tagset.ops = &nvme_mq_ops;
		dev->tagset.nr_hw_queues = dev->online_queues - 1;
		dev->tagset.nr_hw_queues = dev->online_queues - 1;
		dev->tagset.timeout = NVME_IO_TIMEOUT;
		dev->tagset.timeout = NVME_IO_TIMEOUT;
@@ -2361,7 +2376,7 @@ static int nvme_dev_add(struct nvme_dev *dev)


		if (blk_mq_alloc_tag_set(&dev->tagset))
		if (blk_mq_alloc_tag_set(&dev->tagset))
			return 0;
			return 0;

	}
	schedule_work(&dev->scan_work);
	schedule_work(&dev->scan_work);
	return 0;
	return 0;
}
}
@@ -2734,7 +2749,9 @@ static void nvme_free_dev(struct kref *kref)
	put_device(dev->device);
	put_device(dev->device);
	nvme_free_namespaces(dev);
	nvme_free_namespaces(dev);
	nvme_release_instance(dev);
	nvme_release_instance(dev);
	if (dev->tagset.tags)
		blk_mq_free_tag_set(&dev->tagset);
		blk_mq_free_tag_set(&dev->tagset);
	if (dev->admin_q)
		blk_put_queue(dev->admin_q);
		blk_put_queue(dev->admin_q);
	kfree(dev->queues);
	kfree(dev->queues);
	kfree(dev->entry);
	kfree(dev->entry);
@@ -2866,6 +2883,9 @@ static int nvme_dev_start(struct nvme_dev *dev)


 free_tags:
 free_tags:
	nvme_dev_remove_admin(dev);
	nvme_dev_remove_admin(dev);
	blk_put_queue(dev->admin_q);
	dev->admin_q = NULL;
	dev->queues[0]->tags = NULL;
 disable:
 disable:
	nvme_disable_queue(dev, 0);
	nvme_disable_queue(dev, 0);
	nvme_dev_list_remove(dev);
	nvme_dev_list_remove(dev);
@@ -2907,16 +2927,14 @@ static int nvme_dev_resume(struct nvme_dev *dev)
		spin_unlock(&dev_list_lock);
		spin_unlock(&dev_list_lock);
	} else {
	} else {
		nvme_unfreeze_queues(dev);
		nvme_unfreeze_queues(dev);
		schedule_work(&dev->scan_work);
		nvme_dev_add(dev);
		nvme_set_irq_hints(dev);
		nvme_set_irq_hints(dev);
	}
	}
	return 0;
	return 0;
}
}


static void nvme_dev_reset(struct nvme_dev *dev)
static void nvme_dead_ctrl(struct nvme_dev *dev)
{
{
	nvme_dev_shutdown(dev);
	if (nvme_dev_resume(dev)) {
	dev_warn(dev->dev, "Device failed to resume\n");
	dev_warn(dev->dev, "Device failed to resume\n");
	kref_get(&dev->kref);
	kref_get(&dev->kref);
	if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d",
	if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d",
@@ -2926,6 +2944,26 @@ static void nvme_dev_reset(struct nvme_dev *dev)
		kref_put(&dev->kref, nvme_free_dev);
		kref_put(&dev->kref, nvme_free_dev);
	}
	}
}
}

static void nvme_dev_reset(struct nvme_dev *dev)
{
	bool in_probe = work_busy(&dev->probe_work);

	nvme_dev_shutdown(dev);

	/* Synchronize with device probe so that work will see failure status
	 * and exit gracefully without trying to schedule another reset */
	flush_work(&dev->probe_work);

	/* Fail this device if reset occured during probe to avoid
	 * infinite initialization loops. */
	if (in_probe) {
		nvme_dead_ctrl(dev);
		return;
	}
	/* Schedule device resume asynchronously so the reset work is available
	 * to cleanup errors that may occur during reinitialization */
	schedule_work(&dev->probe_work);
}
}


static void nvme_reset_failed_dev(struct work_struct *ws)
static void nvme_reset_failed_dev(struct work_struct *ws)
@@ -2957,6 +2995,7 @@ static int nvme_reset(struct nvme_dev *dev)


	if (!ret) {
	if (!ret) {
		flush_work(&dev->reset_work);
		flush_work(&dev->reset_work);
		flush_work(&dev->probe_work);
		return 0;
		return 0;
	}
	}


@@ -3053,26 +3092,9 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
static void nvme_async_probe(struct work_struct *work)
static void nvme_async_probe(struct work_struct *work)
{
{
	struct nvme_dev *dev = container_of(work, struct nvme_dev, probe_work);
	struct nvme_dev *dev = container_of(work, struct nvme_dev, probe_work);
	int result;

	result = nvme_dev_start(dev);
	if (result)
		goto reset;


	if (dev->online_queues > 1)
	if (nvme_dev_resume(dev) && !work_busy(&dev->reset_work))
		result = nvme_dev_add(dev);
		nvme_dead_ctrl(dev);
	if (result)
		goto reset;

	nvme_set_irq_hints(dev);
	return;
 reset:
	spin_lock(&dev_list_lock);
	if (!work_busy(&dev->reset_work)) {
		dev->reset_workfn = nvme_reset_failed_dev;
		queue_work(nvme_workq, &dev->reset_work);
	}
	spin_unlock(&dev_list_lock);
}
}


static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
@@ -3104,8 +3126,8 @@ static void nvme_remove(struct pci_dev *pdev)
	flush_work(&dev->reset_work);
	flush_work(&dev->reset_work);
	flush_work(&dev->scan_work);
	flush_work(&dev->scan_work);
	device_remove_file(dev->device, &dev_attr_reset_controller);
	device_remove_file(dev->device, &dev_attr_reset_controller);
	nvme_dev_shutdown(dev);
	nvme_dev_remove(dev);
	nvme_dev_remove(dev);
	nvme_dev_shutdown(dev);
	nvme_dev_remove_admin(dev);
	nvme_dev_remove_admin(dev);
	device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance));
	device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance));
	nvme_free_queues(dev, 0);
	nvme_free_queues(dev, 0);
+13 −0
Original line number Original line Diff line number Diff line
@@ -83,6 +83,13 @@ module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
MODULE_PARM_DESC(max_persistent_grants,
MODULE_PARM_DESC(max_persistent_grants,
                 "Maximum number of grants to map persistently");
                 "Maximum number of grants to map persistently");


/*
 * Maximum order of pages to be used for the shared ring between front and
 * backend, 4KB page granularity is used.
 */
unsigned int xen_blkif_max_ring_order = XENBUS_MAX_RING_PAGE_ORDER;
module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO);
MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
/*
/*
 * The LRU mechanism to clean the lists of persistent grants needs to
 * The LRU mechanism to clean the lists of persistent grants needs to
 * be executed periodically. The time interval between consecutive executions
 * be executed periodically. The time interval between consecutive executions
@@ -1438,6 +1445,12 @@ static int __init xen_blkif_init(void)
	if (!xen_domain())
	if (!xen_domain())
		return -ENODEV;
		return -ENODEV;


	if (xen_blkif_max_ring_order > XENBUS_MAX_RING_PAGE_ORDER) {
		pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
			xen_blkif_max_ring_order, XENBUS_MAX_RING_PAGE_ORDER);
		xen_blkif_max_ring_order = XENBUS_MAX_RING_PAGE_ORDER;
	}

	rc = xen_blkif_interface_init();
	rc = xen_blkif_interface_init();
	if (rc)
	if (rc)
		goto failed_init;
		goto failed_init;
+3 −1
Original line number Original line Diff line number Diff line
@@ -44,6 +44,7 @@
#include <xen/interface/io/blkif.h>
#include <xen/interface/io/blkif.h>
#include <xen/interface/io/protocols.h>
#include <xen/interface/io/protocols.h>


extern unsigned int xen_blkif_max_ring_order;
/*
/*
 * This is the maximum number of segments that would be allowed in indirect
 * This is the maximum number of segments that would be allowed in indirect
 * requests. This value will also be passed to the frontend.
 * requests. This value will also be passed to the frontend.
@@ -248,7 +249,7 @@ struct backend_info;
#define PERSISTENT_GNT_WAS_ACTIVE	1
#define PERSISTENT_GNT_WAS_ACTIVE	1


/* Number of requests that we can fit in a ring */
/* Number of requests that we can fit in a ring */
#define XEN_BLKIF_REQS			32
#define XEN_BLKIF_REQS_PER_PAGE		32


struct persistent_gnt {
struct persistent_gnt {
	struct page *page;
	struct page *page;
@@ -320,6 +321,7 @@ struct xen_blkif {
	struct work_struct	free_work;
	struct work_struct	free_work;
	/* Thread shutdown wait queue. */
	/* Thread shutdown wait queue. */
	wait_queue_head_t	shutdown_wq;
	wait_queue_head_t	shutdown_wq;
	unsigned int nr_ring_pages;
};
};


struct seg_buf {
struct seg_buf {
+105 −62
Original line number Original line Diff line number Diff line
@@ -25,6 +25,7 @@


/* Enlarge the array size in order to fully show blkback name. */
/* Enlarge the array size in order to fully show blkback name. */
#define BLKBACK_NAME_LEN (20)
#define BLKBACK_NAME_LEN (20)
#define RINGREF_NAME_LEN (20)


struct backend_info {
struct backend_info {
	struct xenbus_device	*dev;
	struct xenbus_device	*dev;
@@ -124,8 +125,6 @@ static void xen_update_blkif_status(struct xen_blkif *blkif)
static struct xen_blkif *xen_blkif_alloc(domid_t domid)
static struct xen_blkif *xen_blkif_alloc(domid_t domid)
{
{
	struct xen_blkif *blkif;
	struct xen_blkif *blkif;
	struct pending_req *req, *n;
	int i, j;


	BUILD_BUG_ON(MAX_INDIRECT_PAGES > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
	BUILD_BUG_ON(MAX_INDIRECT_PAGES > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);


@@ -151,55 +150,15 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)


	INIT_LIST_HEAD(&blkif->pending_free);
	INIT_LIST_HEAD(&blkif->pending_free);
	INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);
	INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);

	for (i = 0; i < XEN_BLKIF_REQS; i++) {
		req = kzalloc(sizeof(*req), GFP_KERNEL);
		if (!req)
			goto fail;
		list_add_tail(&req->free_list,
		              &blkif->pending_free);
		for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
			req->segments[j] = kzalloc(sizeof(*req->segments[0]),
			                           GFP_KERNEL);
			if (!req->segments[j])
				goto fail;
		}
		for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
			req->indirect_pages[j] = kzalloc(sizeof(*req->indirect_pages[0]),
			                                 GFP_KERNEL);
			if (!req->indirect_pages[j])
				goto fail;
		}
	}
	spin_lock_init(&blkif->pending_free_lock);
	spin_lock_init(&blkif->pending_free_lock);
	init_waitqueue_head(&blkif->pending_free_wq);
	init_waitqueue_head(&blkif->pending_free_wq);
	init_waitqueue_head(&blkif->shutdown_wq);
	init_waitqueue_head(&blkif->shutdown_wq);


	return blkif;
	return blkif;

fail:
	list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
		list_del(&req->free_list);
		for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
			if (!req->segments[j])
				break;
			kfree(req->segments[j]);
		}
		for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
			if (!req->indirect_pages[j])
				break;
			kfree(req->indirect_pages[j]);
		}
		kfree(req);
	}

	kmem_cache_free(xen_blkif_cachep, blkif);

	return ERR_PTR(-ENOMEM);
}
}


static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t gref,
static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t *gref,
			 unsigned int evtchn)
			 unsigned int nr_grefs, unsigned int evtchn)
{
{
	int err;
	int err;


@@ -207,7 +166,7 @@ static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t gref,
	if (blkif->irq)
	if (blkif->irq)
		return 0;
		return 0;


	err = xenbus_map_ring_valloc(blkif->be->dev, &gref, 1,
	err = xenbus_map_ring_valloc(blkif->be->dev, gref, nr_grefs,
				     &blkif->blk_ring);
				     &blkif->blk_ring);
	if (err < 0)
	if (err < 0)
		return err;
		return err;
@@ -217,21 +176,21 @@ static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t gref,
	{
	{
		struct blkif_sring *sring;
		struct blkif_sring *sring;
		sring = (struct blkif_sring *)blkif->blk_ring;
		sring = (struct blkif_sring *)blkif->blk_ring;
		BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
		BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE * nr_grefs);
		break;
		break;
	}
	}
	case BLKIF_PROTOCOL_X86_32:
	case BLKIF_PROTOCOL_X86_32:
	{
	{
		struct blkif_x86_32_sring *sring_x86_32;
		struct blkif_x86_32_sring *sring_x86_32;
		sring_x86_32 = (struct blkif_x86_32_sring *)blkif->blk_ring;
		sring_x86_32 = (struct blkif_x86_32_sring *)blkif->blk_ring;
		BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
		BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE * nr_grefs);
		break;
		break;
	}
	}
	case BLKIF_PROTOCOL_X86_64:
	case BLKIF_PROTOCOL_X86_64:
	{
	{
		struct blkif_x86_64_sring *sring_x86_64;
		struct blkif_x86_64_sring *sring_x86_64;
		sring_x86_64 = (struct blkif_x86_64_sring *)blkif->blk_ring;
		sring_x86_64 = (struct blkif_x86_64_sring *)blkif->blk_ring;
		BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
		BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE * nr_grefs);
		break;
		break;
	}
	}
	default:
	default:
@@ -312,7 +271,7 @@ static void xen_blkif_free(struct xen_blkif *blkif)
		i++;
		i++;
	}
	}


	WARN_ON(i != XEN_BLKIF_REQS);
	WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));


	kmem_cache_free(xen_blkif_cachep, blkif);
	kmem_cache_free(xen_blkif_cachep, blkif);
}
}
@@ -597,6 +556,11 @@ static int xen_blkbk_probe(struct xenbus_device *dev,
	if (err)
	if (err)
		goto fail;
		goto fail;


	err = xenbus_printf(XBT_NIL, dev->nodename, "max-ring-page-order", "%u",
			    xen_blkif_max_ring_order);
	if (err)
		pr_warn("%s write out 'max-ring-page-order' failed\n", __func__);

	err = xenbus_switch_state(dev, XenbusStateInitWait);
	err = xenbus_switch_state(dev, XenbusStateInitWait);
	if (err)
	if (err)
		goto fail;
		goto fail;
@@ -860,22 +824,66 @@ static void connect(struct backend_info *be)
static int connect_ring(struct backend_info *be)
static int connect_ring(struct backend_info *be)
{
{
	struct xenbus_device *dev = be->dev;
	struct xenbus_device *dev = be->dev;
	unsigned long ring_ref;
	unsigned int ring_ref[XENBUS_MAX_RING_PAGES];
	unsigned int evtchn;
	unsigned int evtchn, nr_grefs, ring_page_order;
	unsigned int pers_grants;
	unsigned int pers_grants;
	char protocol[64] = "";
	char protocol[64] = "";
	int err;
	struct pending_req *req, *n;
	int err, i, j;


	pr_debug("%s %s\n", __func__, dev->otherend);
	pr_debug("%s %s\n", __func__, dev->otherend);


	err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu",
	err = xenbus_scanf(XBT_NIL, dev->otherend, "event-channel", "%u",
			    &ring_ref, "event-channel", "%u", &evtchn, NULL);
			  &evtchn);
	if (err) {
	if (err != 1) {
		xenbus_dev_fatal(dev, err,
		err = -EINVAL;
				 "reading %s/ring-ref and event-channel",
		xenbus_dev_fatal(dev, err, "reading %s/event-channel",
				 dev->otherend);
				 dev->otherend);
		return err;
		return err;
	}
	}
	pr_info("event-channel %u\n", evtchn);

	err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
			  &ring_page_order);
	if (err != 1) {
		err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-ref",
				  "%u", &ring_ref[0]);
		if (err != 1) {
			err = -EINVAL;
			xenbus_dev_fatal(dev, err, "reading %s/ring-ref",
					 dev->otherend);
			return err;
		}
		nr_grefs = 1;
		pr_info("%s:using single page: ring-ref %d\n", dev->otherend,
			ring_ref[0]);
	} else {
		unsigned int i;

		if (ring_page_order > xen_blkif_max_ring_order) {
			err = -EINVAL;
			xenbus_dev_fatal(dev, err, "%s/request %d ring page order exceed max:%d",
					 dev->otherend, ring_page_order,
					 xen_blkif_max_ring_order);
			return err;
		}

		nr_grefs = 1 << ring_page_order;
		for (i = 0; i < nr_grefs; i++) {
			char ring_ref_name[RINGREF_NAME_LEN];

			snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
			err = xenbus_scanf(XBT_NIL, dev->otherend, ring_ref_name,
					   "%u", &ring_ref[i]);
			if (err != 1) {
				err = -EINVAL;
				xenbus_dev_fatal(dev, err, "reading %s/%s",
						 dev->otherend, ring_ref_name);
				return err;
			}
			pr_info("ring-ref%u: %u\n", i, ring_ref[i]);
		}
	}


	be->blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT;
	be->blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT;
	err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
	err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
@@ -900,20 +908,55 @@ static int connect_ring(struct backend_info *be)


	be->blkif->vbd.feature_gnt_persistent = pers_grants;
	be->blkif->vbd.feature_gnt_persistent = pers_grants;
	be->blkif->vbd.overflow_max_grants = 0;
	be->blkif->vbd.overflow_max_grants = 0;
	be->blkif->nr_ring_pages = nr_grefs;


	pr_info("ring-ref %ld, event-channel %d, protocol %d (%s) %s\n",
	pr_info("ring-pages:%d, event-channel %d, protocol %d (%s) %s\n",
		ring_ref, evtchn, be->blkif->blk_protocol, protocol,
		nr_grefs, evtchn, be->blkif->blk_protocol, protocol,
		pers_grants ? "persistent grants" : "");
		pers_grants ? "persistent grants" : "");


	for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
		req = kzalloc(sizeof(*req), GFP_KERNEL);
		if (!req)
			goto fail;
		list_add_tail(&req->free_list, &be->blkif->pending_free);
		for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
			req->segments[j] = kzalloc(sizeof(*req->segments[0]), GFP_KERNEL);
			if (!req->segments[j])
				goto fail;
		}
		for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
			req->indirect_pages[j] = kzalloc(sizeof(*req->indirect_pages[0]),
							 GFP_KERNEL);
			if (!req->indirect_pages[j])
				goto fail;
		}
	}

	/* Map the shared frame, irq etc. */
	/* Map the shared frame, irq etc. */
	err = xen_blkif_map(be->blkif, ring_ref, evtchn);
	err = xen_blkif_map(be->blkif, ring_ref, nr_grefs, evtchn);
	if (err) {
	if (err) {
		xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u",
		xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn);
				 ring_ref, evtchn);
		return err;
		return err;
	}
	}


	return 0;
	return 0;

fail:
	list_for_each_entry_safe(req, n, &be->blkif->pending_free, free_list) {
		list_del(&req->free_list);
		for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
			if (!req->segments[j])
				break;
			kfree(req->segments[j]);
		}
		for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
			if (!req->indirect_pages[j])
				break;
			kfree(req->indirect_pages[j]);
		}
		kfree(req);
	}
	return -ENOMEM;
}
}


static const struct xenbus_device_id xen_blkbk_ids[] = {
static const struct xenbus_device_id xen_blkbk_ids[] = {
Loading