Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d7927df8 authored by Martin Schwidefsky's avatar Martin Schwidefsky
Browse files

Merge tag 'vfio-ccw-20180529' of...

Merge tag 'vfio-ccw-20180529' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/vfio-ccw into features

Pull vfio-ccw from Cornelia Huck with the following changes:

 - Various fixes and improvements in vfio-ccw, including a first stab
   at adding tracepoints.
parents 8efcf34a 3cd90214
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -5,6 +5,7 @@

# The following is required for define_trace.h to find ./trace.h
CFLAGS_trace.o := -I$(src)
CFLAGS_vfio_ccw_fsm.o := -I$(src)

obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
	fcx.o itcw.o crw.o ccwreq.o trace.o ioasm.o
+75 −65
Original line number Diff line number Diff line
@@ -23,9 +23,13 @@
#define CCWCHAIN_LEN_MAX	256

struct pfn_array {
	/* Starting guest physical I/O address. */
	unsigned long		pa_iova;
	/* Array that stores PFNs of the pages need to pin. */
	unsigned long		*pa_iova_pfn;
	/* Array that receives PFNs of the pages pinned. */
	unsigned long		*pa_pfn;
	/* Number of pages pinned from @pa_iova. */
	int			pa_nr;
};

@@ -46,70 +50,33 @@ struct ccwchain {
};

/*
 * pfn_array_pin() - pin user pages in memory
 * pfn_array_alloc_pin() - alloc memory for PFNs, then pin user pages in memory
 * @pa: pfn_array on which to perform the operation
 * @mdev: the mediated device to perform pin/unpin operations
 * @iova: target guest physical address
 * @len: number of bytes that should be pinned from @iova
 *
 * Attempt to pin user pages in memory.
 * Attempt to allocate memory for PFNs, and pin user pages in memory.
 *
 * Usage of pfn_array:
 * @pa->pa_iova     starting guest physical I/O address. Assigned by caller.
 * @pa->pa_iova_pfn array that stores PFNs of the pages need to pin. Allocated
 *                  by caller.
 * @pa->pa_pfn      array that receives PFNs of the pages pinned. Allocated by
 *                  caller.
 * @pa->pa_nr       number of pages from @pa->pa_iova to pin. Assigned by
 *                  caller.
 *                  number of pages pinned. Assigned by callee.
 * We expect (pa_nr == 0) and (pa_iova_pfn == NULL), any field in
 * this structure will be filled in by this function.
 *
 * Returns:
 *   Number of pages pinned on success.
 *   If @pa->pa_nr is 0 or negative, returns 0.
 *   If @pa->pa_nr is not 0, or @pa->pa_iova_pfn is not NULL initially,
 *   returns -EINVAL.
 *   If no pages were pinned, returns -errno.
 */
static int pfn_array_pin(struct pfn_array *pa, struct device *mdev)
{
	int i, ret;

	if (pa->pa_nr <= 0) {
		pa->pa_nr = 0;
		return 0;
	}

	pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
	for (i = 1; i < pa->pa_nr; i++)
		pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;

	ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
			     IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);

	if (ret > 0 && ret != pa->pa_nr) {
		vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret);
		pa->pa_nr = 0;
		return 0;
	}

	return ret;
}

/* Unpin the pages before releasing the memory. */
static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
{
	vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
	pa->pa_nr = 0;
	kfree(pa->pa_iova_pfn);
}

/* Alloc memory for PFNs, then pin pages with them. */
static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
			       u64 iova, unsigned int len)
{
	int ret = 0;
	int i, ret = 0;

	if (!len)
		return 0;

	if (pa->pa_nr)
	if (pa->pa_nr || pa->pa_iova_pfn)
		return -EINVAL;

	pa->pa_iova = iova;
@@ -126,18 +93,39 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
		return -ENOMEM;
	pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;

	ret = pfn_array_pin(pa, mdev);
	pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
	for (i = 1; i < pa->pa_nr; i++)
		pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;

	if (ret > 0)
		return ret;
	else if (!ret)
	ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
			     IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);

	if (ret < 0) {
		goto err_out;
	} else if (ret > 0 && ret != pa->pa_nr) {
		vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret);
		ret = -EINVAL;
		goto err_out;
	}

	return ret;

err_out:
	pa->pa_nr = 0;
	kfree(pa->pa_iova_pfn);
	pa->pa_iova_pfn = NULL;

	return ret;
}

/* Unpin the pages before releasing the memory. */
static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
{
	vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
	pa->pa_nr = 0;
	kfree(pa->pa_iova_pfn);
}

static int pfn_array_table_init(struct pfn_array_table *pat, int nr)
{
	pat->pat_pa = kcalloc(nr, sizeof(*pat->pat_pa), GFP_KERNEL);
@@ -365,6 +353,9 @@ static void cp_unpin_free(struct channel_program *cp)
 * This is the chain length not considering any TICs.
 * You need to do a new round for each TIC target.
 *
 * The program is also validated for absence of not yet supported
 * indirect data addressing scenarios.
 *
 * Returns: the length of the ccw chain or -errno.
 */
static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
@@ -391,6 +382,14 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
	do {
		cnt++;

		/*
		 * As we don't want to fail direct addressing even if the
		 * orb specified one of the unsupported formats, we defer
		 * checking for IDAWs in unsupported formats to here.
		 */
		if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw))
			return -EOPNOTSUPP;

		if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw)))
			break;

@@ -503,7 +502,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
	struct ccw1 *ccw;
	struct pfn_array_table *pat;
	unsigned long *idaws;
	int idaw_nr;
	int ret;

	ccw = chain->ch_ccw + idx;

@@ -523,18 +522,19 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
	 * needed when translating a direct ccw to a idal ccw.
	 */
	pat = chain->ch_pat + idx;
	if (pfn_array_table_init(pat, 1))
		return -ENOMEM;
	idaw_nr = pfn_array_alloc_pin(pat->pat_pa, cp->mdev,
				      ccw->cda, ccw->count);
	if (idaw_nr < 0)
		return idaw_nr;
	ret = pfn_array_table_init(pat, 1);
	if (ret)
		goto out_init;

	ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count);
	if (ret < 0)
		goto out_init;

	/* Translate this direct ccw to a idal ccw. */
	idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
	idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
	if (!idaws) {
		pfn_array_table_unpin_free(pat, cp->mdev);
		return -ENOMEM;
		ret = -ENOMEM;
		goto out_unpin;
	}
	ccw->cda = (__u32) virt_to_phys(idaws);
	ccw->flags |= CCW_FLAG_IDA;
@@ -542,6 +542,12 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
	pfn_array_table_idal_create_words(pat, idaws);

	return 0;

out_unpin:
	pfn_array_table_unpin_free(pat, cp->mdev);
out_init:
	ccw->cda = 0;
	return ret;
}

static int ccwchain_fetch_idal(struct ccwchain *chain,
@@ -571,7 +577,7 @@ static int ccwchain_fetch_idal(struct ccwchain *chain,
	pat = chain->ch_pat + idx;
	ret = pfn_array_table_init(pat, idaw_nr);
	if (ret)
		return ret;
		goto out_init;

	/* Translate idal ccw to use new allocated idaws. */
	idaws = kzalloc(idaw_len, GFP_DMA | GFP_KERNEL);
@@ -603,6 +609,8 @@ static int ccwchain_fetch_idal(struct ccwchain *chain,
	kfree(idaws);
out_unpin:
	pfn_array_table_unpin_free(pat, cp->mdev);
out_init:
	ccw->cda = 0;
	return ret;
}

@@ -656,10 +664,8 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
	/*
	 * XXX:
	 * Only support prefetch enable mode now.
	 * Only support 64bit addressing idal.
	 * Only support 4k IDAW.
	 */
	if (!orb->cmd.pfch || !orb->cmd.c64 || orb->cmd.i2k)
	if (!orb->cmd.pfch)
		return -EOPNOTSUPP;

	INIT_LIST_HEAD(&cp->ccwchain_list);
@@ -688,6 +694,10 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
	ret = ccwchain_loop_tic(chain, cp);
	if (ret)
		cp_unpin_free(cp);
	/* It is safe to force: if not set but idals used
	 * ccwchain_calc_length returns an error.
	 */
	cp->orb.cmd.c64 = 1;

	return ret;
}
+4 −1
Original line number Diff line number Diff line
@@ -177,6 +177,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
{
	struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
	unsigned long flags;
	int rc = -EAGAIN;

	spin_lock_irqsave(sch->lock, flags);
	if (!device_is_registered(&sch->dev))
@@ -187,6 +188,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)

	if (cio_update_schib(sch)) {
		vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
		rc = 0;
		goto out_unlock;
	}

@@ -195,11 +197,12 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
		private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
				 VFIO_CCW_STATE_STANDBY;
	}
	rc = 0;

out_unlock:
	spin_unlock_irqrestore(sch->lock, flags);

	return 0;
	return rc;
}

static struct css_device_id vfio_ccw_sch_ids[] = {
+16 −1
Original line number Diff line number Diff line
@@ -13,6 +13,9 @@
#include "ioasm.h"
#include "vfio_ccw_private.h"

#define CREATE_TRACE_POINTS
#include "vfio_ccw_trace.h"

static int fsm_io_helper(struct vfio_ccw_private *private)
{
	struct subchannel *sch;
@@ -110,6 +113,10 @@ static void fsm_disabled_irq(struct vfio_ccw_private *private,
	 */
	cio_disable_subchannel(sch);
}
inline struct subchannel_id get_schid(struct vfio_ccw_private *p)
{
	return p->sch->schid;
}

/*
 * Deal with the ccw command request from the userspace.
@@ -121,6 +128,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
	union scsw *scsw = &private->scsw;
	struct ccw_io_region *io_region = &private->io_region;
	struct mdev_device *mdev = private->mdev;
	char *errstr = "request";

	private->state = VFIO_CCW_STATE_BOXED;

@@ -132,15 +140,19 @@ static void fsm_io_request(struct vfio_ccw_private *private,
		/* Don't try to build a cp if transport mode is specified. */
		if (orb->tm.b) {
			io_region->ret_code = -EOPNOTSUPP;
			errstr = "transport mode";
			goto err_out;
		}
		io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
					      orb);
		if (io_region->ret_code)
		if (io_region->ret_code) {
			errstr = "cp init";
			goto err_out;
		}

		io_region->ret_code = cp_prefetch(&private->cp);
		if (io_region->ret_code) {
			errstr = "cp prefetch";
			cp_free(&private->cp);
			goto err_out;
		}
@@ -148,6 +160,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
		/* Start channel program and wait for I/O interrupt. */
		io_region->ret_code = fsm_io_helper(private);
		if (io_region->ret_code) {
			errstr = "cp fsm_io_helper";
			cp_free(&private->cp);
			goto err_out;
		}
@@ -164,6 +177,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,

err_out:
	private->state = VFIO_CCW_STATE_IDLE;
	trace_vfio_ccw_io_fctl(scsw->cmd.fctl, get_schid(private),
			       io_region->ret_code, errstr);
}

/*
+54 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0
 * Tracepoints for vfio_ccw driver
 *
 * Copyright IBM Corp. 2018
 *
 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
 *            Halil Pasic <pasic@linux.vnet.ibm.com>
 */

#undef TRACE_SYSTEM
#define TRACE_SYSTEM vfio_ccw

#if !defined(_VFIO_CCW_TRACE_) || defined(TRACE_HEADER_MULTI_READ)
#define _VFIO_CCW_TRACE_

#include <linux/tracepoint.h>

TRACE_EVENT(vfio_ccw_io_fctl,
	TP_PROTO(int fctl, struct subchannel_id schid, int errno, char *errstr),
	TP_ARGS(fctl, schid, errno, errstr),

	TP_STRUCT__entry(
		__field(int, fctl)
		__field_struct(struct subchannel_id, schid)
		__field(int, errno)
		__field(char*, errstr)
	),

	TP_fast_assign(
		__entry->fctl = fctl;
		__entry->schid = schid;
		__entry->errno = errno;
		__entry->errstr = errstr;
	),

	TP_printk("schid=%x.%x.%04x fctl=%x errno=%d info=%s",
		  __entry->schid.cssid,
		  __entry->schid.ssid,
		  __entry->schid.sch_no,
		  __entry->fctl,
		  __entry->errno,
		  __entry->errstr)
);

#endif /* _VFIO_CCW_TRACE_ */

/* This part must be outside protection */

#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE vfio_ccw_trace

#include <trace/define_trace.h>