Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 269a1b78 authored by Danny Segal's avatar Danny Segal
Browse files

usb: gadget: Add support for DMA mapping optimization of request buffers



In order to improve performance, this patch enabled mapping of USB request
buffers in advance, before queueing them to the HW. This can be used when
there is a pool or pre-mapped request buffers so we don't need to map it
again every time the request gets queued.

Change-Id: Ic3a6db749343ef1345e0816210517755aee055b3
Signed-off-by: default avatarDanny Segal <dsegal@codeaurora.org>
parent 77ea4f2d
Loading
Loading
Loading
Loading
+6 −7
Original line number Diff line number Diff line
@@ -75,7 +75,6 @@
 * DEFINE
 *****************************************************************************/

#define DMA_ADDR_INVALID	(~(dma_addr_t)0)
#define USB_MAX_TIMEOUT		25 /* 25msec timeout */
#define EP_PRIME_CHECK_DELAY	(jiffies + msecs_to_jiffies(1000))
#define MAX_PRIME_CHECK_RETRY	3 /*Wait for 3sec for EP prime failure */
@@ -1955,7 +1954,7 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
		return -EALREADY;

	mReq->req.status = -EALREADY;
	if (length && mReq->req.dma == DMA_ADDR_INVALID) {
	if (length && mReq->req.dma == DMA_ERROR_CODE) {
		mReq->req.dma = \
			dma_map_single(mEp->device, mReq->req.buf,
				       length, mEp->dir ? DMA_TO_DEVICE :
@@ -1974,7 +1973,7 @@ static int _hardware_enqueue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
				dma_unmap_single(mEp->device, mReq->req.dma,
					length, mEp->dir ? DMA_TO_DEVICE :
					DMA_FROM_DEVICE);
				mReq->req.dma = DMA_ADDR_INVALID;
				mReq->req.dma = DMA_ERROR_CODE;
				mReq->map     = 0;
			}
			return -ENOMEM;
@@ -2186,7 +2185,7 @@ static int _hardware_dequeue(struct ci13xxx_ep *mEp, struct ci13xxx_req *mReq)
	if (mReq->map) {
		dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
				 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
		mReq->req.dma = DMA_ADDR_INVALID;
		mReq->req.dma = DMA_ERROR_CODE;
		mReq->map     = 0;
	}

@@ -2318,7 +2317,7 @@ static void release_ep_request(struct ci13xxx_ep *mEp,
		dma_unmap_single(mEp->device, mReq->req.dma,
			mReq->req.length,
			mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
		mReq->req.dma = DMA_ADDR_INVALID;
		mReq->req.dma = DMA_ERROR_CODE;
		mReq->map     = 0;
	}

@@ -3172,7 +3171,7 @@ static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
	mReq = kzalloc(sizeof(struct ci13xxx_req), gfp_flags);
	if (mReq != NULL) {
		INIT_LIST_HEAD(&mReq->queue);
		mReq->req.dma = DMA_ADDR_INVALID;
		mReq->req.dma = DMA_ERROR_CODE;

		mReq->ptr = dma_pool_alloc(mEp->td_pool, gfp_flags,
					   &mReq->dma);
@@ -3389,7 +3388,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
	if (mReq->map) {
		dma_unmap_single(mEp->device, mReq->req.dma, mReq->req.length,
				 mEp->dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
		mReq->req.dma = DMA_ADDR_INVALID;
		mReq->req.dma = DMA_ERROR_CODE;
		mReq->map     = 0;
	}
	req->status = -ECONNRESET;
+10 −3
Original line number Diff line number Diff line
@@ -68,7 +68,7 @@ int usb_gadget_map_request(struct usb_gadget *gadget,
		}

		req->num_mapped_sgs = mapped;
	} else {
	} else if (!req->dma_pre_mapped) {
		req->dma = dma_map_single(&gadget->dev, req->buf, req->length,
				is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);

@@ -93,9 +93,16 @@ void usb_gadget_unmap_request(struct usb_gadget *gadget,
				is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);

		req->num_mapped_sgs = 0;
	} else {
	} else if (!req->dma_pre_mapped && req->dma != DMA_ERROR_CODE) {
		/*
		 * If the DMA address has not been mapped by a higher layer,
		 * then unmap it here. Otherwise, the DMA address will be
		 * unmapped by the upper layer (where the request was queued).
		 */
		dma_unmap_single(&gadget->dev, req->dma, req->length,
			is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);

		req->dma = DMA_ERROR_CODE;
	}
}
EXPORT_SYMBOL_GPL(usb_gadget_unmap_request);
+6 −0
Original line number Diff line number Diff line
@@ -46,6 +46,11 @@ struct usb_ep;
 *     by adding a zero length packet as needed;
 * @short_not_ok: When reading data, makes short packets be
 *     treated as errors (queue stops advancing till cleanup).
 * @dma_pre_mapped: Tells the USB core driver whether this request should be
 *	DMA-mapped before it is queued to the USB HW. When set to true, it means
 *	that the request has already been mapped in advance and therefore the
 *	USB core driver does NOT need to do DMA-mapping when the request is
 *	queued to the USB HW.
 * @complete: Function called when request completes, so this request and
 *	its buffer may be re-used.  The function will always be called with
 *	interrupts disabled, and it must not sleep.
@@ -102,6 +107,7 @@ struct usb_request {
	unsigned		no_interrupt:1;
	unsigned		zero:1;
	unsigned		short_not_ok:1;
	unsigned		dma_pre_mapped:1;

	void			(*complete)(struct usb_ep *ep,
					struct usb_request *req);