Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 25b8e08e authored by Matthew R. Ochs's avatar Matthew R. Ochs Committed by Martin K. Petersen
Browse files

scsi: cxlflash: Staging to support future accelerators



As staging to support future accelerator transports, add a shim layer
such that the underlying services the cxlflash driver requires can be
conditional upon the accelerator infrastructure.

Signed-off-by: default avatarMatthew R. Ochs <mrochs@linux.vnet.ibm.com>
Signed-off-by: default avatarUma Krishnan <ukrishn@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 0df69c60
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
obj-$(CONFIG_CXLFLASH) += cxlflash.o
cxlflash-y += main.o superpipe.o lunmgt.o vlun.o
cxlflash-y += main.o superpipe.o lunmgt.o vlun.o cxl_hw.o
+41 −0
Original line number Diff line number Diff line
/*
 * CXL Flash Device Driver
 *
 * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
 *             Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
 *
 * Copyright (C) 2018 IBM Corporation
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

extern const struct cxlflash_backend_ops cxlflash_cxl_ops;

struct cxlflash_backend_ops {
	struct module *module;
	void __iomem * (*psa_map)(void *);
	void (*psa_unmap)(void __iomem *);
	int (*process_element)(void *);
	int (*map_afu_irq)(void *, int, irq_handler_t, void *, char *);
	void (*unmap_afu_irq)(void *, int, void *);
	int (*start_context)(void *);
	int (*stop_context)(void *);
	int (*afu_reset)(void *);
	void (*set_master)(void *);
	void * (*get_context)(struct pci_dev *, void *);
	void * (*dev_context_init)(struct pci_dev *, void *);
	int (*release_context)(void *);
	void (*perst_reloads_same_image)(void *, bool);
	ssize_t (*read_adapter_vpd)(struct pci_dev *, void *, size_t);
	int (*allocate_afu_irqs)(void *, int);
	void (*free_afu_irqs)(void *);
	void * (*create_afu)(struct pci_dev *);
	struct file * (*get_fd)(void *, struct file_operations *, int *);
	void * (*fops_get_context)(struct file *);
	int (*start_work)(void *, u64);
	int (*fd_mmap)(struct file *, struct vm_area_struct *);
	int (*fd_release)(struct inode *, struct file *);
};
+3 −0
Original line number Diff line number Diff line
@@ -25,6 +25,8 @@
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>

#include "backend.h"

extern const struct file_operations cxlflash_cxl_fops;

#define MAX_CONTEXT	CXLFLASH_MAX_CONTEXT	/* num contexts per afu */
@@ -114,6 +116,7 @@ enum cxlflash_hwq_mode {
struct cxlflash_cfg {
	struct afu *afu;

	const struct cxlflash_backend_ops *ops;
	struct pci_dev *dev;
	struct pci_device_id *dev_id;
	struct Scsi_Host *host;
+168 −0
Original line number Diff line number Diff line
/*
 * CXL Flash Device Driver
 *
 * Written by: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
 *             Uma Krishnan <ukrishn@linux.vnet.ibm.com>, IBM Corporation
 *
 * Copyright (C) 2018 IBM Corporation
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

#include <misc/cxl.h>

#include "backend.h"

/*
 * The following routines map the cxlflash backend operations to existing CXL
 * kernel API function and are largely simple shims that provide an abstraction
 * for converting generic context and AFU cookies into cxl_context or cxl_afu
 * pointers.
 */

static void __iomem *cxlflash_psa_map(void *ctx_cookie)
{
	return cxl_psa_map(ctx_cookie);
}

static void cxlflash_psa_unmap(void __iomem *addr)
{
	cxl_psa_unmap(addr);
}

static int cxlflash_process_element(void *ctx_cookie)
{
	return cxl_process_element(ctx_cookie);
}

static int cxlflash_map_afu_irq(void *ctx_cookie, int num,
				irq_handler_t handler, void *cookie, char *name)
{
	return cxl_map_afu_irq(ctx_cookie, num, handler, cookie, name);
}

static void cxlflash_unmap_afu_irq(void *ctx_cookie, int num, void *cookie)
{
	cxl_unmap_afu_irq(ctx_cookie, num, cookie);
}

static int cxlflash_start_context(void *ctx_cookie)
{
	return cxl_start_context(ctx_cookie, 0, NULL);
}

static int cxlflash_stop_context(void *ctx_cookie)
{
	return cxl_stop_context(ctx_cookie);
}

static int cxlflash_afu_reset(void *ctx_cookie)
{
	return cxl_afu_reset(ctx_cookie);
}

static void cxlflash_set_master(void *ctx_cookie)
{
	cxl_set_master(ctx_cookie);
}

static void *cxlflash_get_context(struct pci_dev *dev, void *afu_cookie)
{
	return cxl_get_context(dev);
}

static void *cxlflash_dev_context_init(struct pci_dev *dev, void *afu_cookie)
{
	return cxl_dev_context_init(dev);
}

static int cxlflash_release_context(void *ctx_cookie)
{
	return cxl_release_context(ctx_cookie);
}

static void cxlflash_perst_reloads_same_image(void *afu_cookie, bool image)
{
	cxl_perst_reloads_same_image(afu_cookie, image);
}

static ssize_t cxlflash_read_adapter_vpd(struct pci_dev *dev,
					 void *buf, size_t count)
{
	return cxl_read_adapter_vpd(dev, buf, count);
}

static int cxlflash_allocate_afu_irqs(void *ctx_cookie, int num)
{
	return cxl_allocate_afu_irqs(ctx_cookie, num);
}

static void cxlflash_free_afu_irqs(void *ctx_cookie)
{
	cxl_free_afu_irqs(ctx_cookie);
}

static void *cxlflash_create_afu(struct pci_dev *dev)
{
	return cxl_pci_to_afu(dev);
}

static struct file *cxlflash_get_fd(void *ctx_cookie,
				    struct file_operations *fops, int *fd)
{
	return cxl_get_fd(ctx_cookie, fops, fd);
}

static void *cxlflash_fops_get_context(struct file *file)
{
	return cxl_fops_get_context(file);
}

static int cxlflash_start_work(void *ctx_cookie, u64 irqs)
{
	struct cxl_ioctl_start_work work = { 0 };

	work.num_interrupts = irqs;
	work.flags = CXL_START_WORK_NUM_IRQS;

	return cxl_start_work(ctx_cookie, &work);
}

static int cxlflash_fd_mmap(struct file *file, struct vm_area_struct *vm)
{
	return cxl_fd_mmap(file, vm);
}

static int cxlflash_fd_release(struct inode *inode, struct file *file)
{
	return cxl_fd_release(inode, file);
}

const struct cxlflash_backend_ops cxlflash_cxl_ops = {
	.module			= THIS_MODULE,
	.psa_map		= cxlflash_psa_map,
	.psa_unmap		= cxlflash_psa_unmap,
	.process_element	= cxlflash_process_element,
	.map_afu_irq		= cxlflash_map_afu_irq,
	.unmap_afu_irq		= cxlflash_unmap_afu_irq,
	.start_context		= cxlflash_start_context,
	.stop_context		= cxlflash_stop_context,
	.afu_reset		= cxlflash_afu_reset,
	.set_master		= cxlflash_set_master,
	.get_context		= cxlflash_get_context,
	.dev_context_init	= cxlflash_dev_context_init,
	.release_context	= cxlflash_release_context,
	.perst_reloads_same_image = cxlflash_perst_reloads_same_image,
	.read_adapter_vpd	= cxlflash_read_adapter_vpd,
	.allocate_afu_irqs	= cxlflash_allocate_afu_irqs,
	.free_afu_irqs		= cxlflash_free_afu_irqs,
	.create_afu		= cxlflash_create_afu,
	.get_fd			= cxlflash_get_fd,
	.fops_get_context	= cxlflash_fops_get_context,
	.start_work		= cxlflash_start_work,
	.fd_mmap		= cxlflash_fd_mmap,
	.fd_release		= cxlflash_fd_release,
};
+30 −49
Original line number Diff line number Diff line
@@ -711,7 +711,7 @@ static void stop_afu(struct cxlflash_cfg *cfg)
		}

		if (likely(afu->afu_map)) {
			cxl_psa_unmap((void __iomem *)afu->afu_map);
			cfg->ops->psa_unmap(afu->afu_map);
			afu->afu_map = NULL;
		}
	}
@@ -748,13 +748,13 @@ static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
	case UNMAP_THREE:
		/* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
		if (index == PRIMARY_HWQ)
			cxl_unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
			cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
	case UNMAP_TWO:
		cxl_unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
		cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
	case UNMAP_ONE:
		cxl_unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
		cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
	case FREE_IRQ:
		cxl_free_afu_irqs(hwq->ctx_cookie);
		cfg->ops->free_afu_irqs(hwq->ctx_cookie);
		/* fall through */
	case UNDO_NOOP:
		/* No action required */
@@ -788,9 +788,9 @@ static void term_mc(struct cxlflash_cfg *cfg, u32 index)
		return;
	}

	WARN_ON(cxl_stop_context(hwq->ctx_cookie));
	WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie));
	if (index != PRIMARY_HWQ)
		WARN_ON(cxl_release_context(hwq->ctx_cookie));
		WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
	hwq->ctx_cookie = NULL;

	spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
@@ -1598,25 +1598,6 @@ static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
	return IRQ_HANDLED;
}

/**
 * start_context() - starts the master context
 * @cfg:	Internal structure associated with the host.
 * @index:	Index of the hardware queue.
 *
 * Return: A success or failure value from CXL services.
 */
static int start_context(struct cxlflash_cfg *cfg, u32 index)
{
	struct device *dev = &cfg->dev->dev;
	struct hwq *hwq = get_hwq(cfg->afu, index);
	int rc = 0;

	rc = cxl_start_context(hwq->ctx_cookie, 0, NULL);

	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
	return rc;
}

/**
 * read_vpd() - obtains the WWPNs from VPD
 * @cfg:	Internal structure associated with the host.
@@ -1639,7 +1620,7 @@ static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
	const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };

	/* Get the VPD data from the device */
	vpd_size = cxl_read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
	vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
	if (unlikely(vpd_size <= 0)) {
		dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
			__func__, vpd_size);
@@ -1731,6 +1712,7 @@ static void init_pcr(struct cxlflash_cfg *cfg)
	struct afu *afu = cfg->afu;
	struct sisl_ctrl_map __iomem *ctrl_map;
	struct hwq *hwq;
	void *cookie;
	int i;

	for (i = 0; i < MAX_CONTEXT; i++) {
@@ -1745,8 +1727,9 @@ static void init_pcr(struct cxlflash_cfg *cfg)
	/* Copy frequently used fields into hwq */
	for (i = 0; i < afu->num_hwqs; i++) {
		hwq = get_hwq(afu, i);
		cookie = hwq->ctx_cookie;

		hwq->ctx_hndl = (u16) cxl_process_element(hwq->ctx_cookie);
		hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie);
		hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
		hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;

@@ -1930,7 +1913,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
	bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
	int num_irqs = is_primary_hwq ? 3 : 2;

	rc = cxl_allocate_afu_irqs(ctx, num_irqs);
	rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
	if (unlikely(rc)) {
		dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
			__func__, rc);
@@ -1938,7 +1921,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
		goto out;
	}

	rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
	rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
				   "SISL_MSI_SYNC_ERROR");
	if (unlikely(rc <= 0)) {
		dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
@@ -1946,7 +1929,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
		goto out;
	}

	rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
	rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
				   "SISL_MSI_RRQ_UPDATED");
	if (unlikely(rc <= 0)) {
		dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
@@ -1958,7 +1941,7 @@ static enum undo_level init_intr(struct cxlflash_cfg *cfg,
	if (!is_primary_hwq)
		goto out;

	rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
	rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
				   "SISL_MSI_ASYNC_ERROR");
	if (unlikely(rc <= 0)) {
		dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
@@ -1989,9 +1972,9 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
	INIT_LIST_HEAD(&hwq->pending_cmds);

	if (index == PRIMARY_HWQ)
		ctx = cxl_get_context(cfg->dev);
		ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
	else
		ctx = cxl_dev_context_init(cfg->dev);
		ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
	if (IS_ERR_OR_NULL(ctx)) {
		rc = -ENOMEM;
		goto err1;
@@ -2001,11 +1984,11 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
	hwq->ctx_cookie = ctx;

	/* Set it up as a master with the CXL */
	cxl_set_master(ctx);
	cfg->ops->set_master(ctx);

	/* Reset AFU when initializing primary context */
	if (index == PRIMARY_HWQ) {
		rc = cxl_afu_reset(ctx);
		rc = cfg->ops->afu_reset(ctx);
		if (unlikely(rc)) {
			dev_err(dev, "%s: AFU reset failed rc=%d\n",
				      __func__, rc);
@@ -2019,11 +2002,8 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
		goto err2;
	}

	/* This performs the equivalent of the CXL_IOCTL_START_WORK.
	 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
	 * element (pe) that is embedded in the context (ctx)
	 */
	rc = start_context(cfg, index);
	/* Finally, activate the context by starting it */
	rc = cfg->ops->start_context(hwq->ctx_cookie);
	if (unlikely(rc)) {
		dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
		level = UNMAP_THREE;
@@ -2036,7 +2016,7 @@ static int init_mc(struct cxlflash_cfg *cfg, u32 index)
err2:
	term_intr(cfg, level, index);
	if (index != PRIMARY_HWQ)
		cxl_release_context(ctx);
		cfg->ops->release_context(ctx);
err1:
	hwq->ctx_cookie = NULL;
	goto out;
@@ -2093,7 +2073,7 @@ static int init_afu(struct cxlflash_cfg *cfg)
	struct hwq *hwq;
	int i;

	cxl_perst_reloads_same_image(cfg->afu_cookie, true);
	cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true);

	afu->num_hwqs = afu->desired_hwqs;
	for (i = 0; i < afu->num_hwqs; i++) {
@@ -2107,9 +2087,9 @@ static int init_afu(struct cxlflash_cfg *cfg)

	/* Map the entire MMIO space of the AFU using the first context */
	hwq = get_hwq(afu, PRIMARY_HWQ);
	afu->afu_map = cxl_psa_map(hwq->ctx_cookie);
	afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie);
	if (!afu->afu_map) {
		dev_err(dev, "%s: cxl_psa_map failed\n", __func__);
		dev_err(dev, "%s: psa_map failed\n", __func__);
		rc = -ENOMEM;
		goto err1;
	}
@@ -3669,6 +3649,7 @@ static int cxlflash_probe(struct pci_dev *pdev,

	cfg->init_state = INIT_STATE_NONE;
	cfg->dev = pdev;
	cfg->ops = &cxlflash_cxl_ops;
	cfg->cxl_fops = cxlflash_cxl_fops;

	/*
@@ -3700,7 +3681,7 @@ static int cxlflash_probe(struct pci_dev *pdev,

	pci_set_drvdata(pdev, cfg);

	cfg->afu_cookie = cxl_pci_to_afu(pdev);
	cfg->afu_cookie = cfg->ops->create_afu(pdev);

	rc = init_pci(cfg);
	if (rc) {
Loading