Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ff9c895f authored by Alan Stern's avatar Alan Stern Committed by Greg Kroah-Hartman
Browse files

USB: fix usbmon and DMA mapping for scatter-gather URBs



This patch (as1368) fixes a rather obscure bug in usbmon: When tracing
URBs sent by the scatter-gather library, it accesses the data buffers
while they are still mapped for DMA.

The solution is to move the mapping and unmapping out of the s-g
library and into the usual place in hcd.c.  This requires the addition
of new URB flag bits to describe the kind of mapping needed, since we
have to call dma_map_sg() if the HCD supports native scatter-gather
operation and dma_map_page() if it doesn't.  The nice thing about
having the new flags is that they simplify the testing for unmapping.

The patch removes the only caller of usb_buffer_[un]map_sg(), so those
functions are #if'ed out.  A later patch will remove them entirely.

As a result of this change, urb->sg will be set in situations where
it wasn't set previously.  Hence the xhci and whci drivers are
adjusted to test urb->num_sgs instead, which retains its original
meaning and is nonzero only when the HCD has to handle a scatterlist.

Finally, even when a submission error occurs we don't want to hand
URBs to usbmon before they are unmapped.  The submission path is
rearranged so that map_urb_for_dma() is called only for non-root-hub
URBs and unmap_urb_for_dma() is called immediately after a submission
error.  This simplifies the error handling.

Signed-off-by: default avatarAlan Stern <stern@rowland.harvard.edu>
CC: <stable@kernel.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent 0ff8d1b3
Loading
Loading
Loading
Loading
+105 −64
Original line number Diff line number Diff line
@@ -1259,6 +1259,51 @@ static void hcd_free_coherent(struct usb_bus *bus, dma_addr_t *dma_handle,
	*dma_handle = 0;
}

static void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
{
	enum dma_data_direction dir;

	if (urb->transfer_flags & URB_SETUP_MAP_SINGLE)
		dma_unmap_single(hcd->self.controller,
				urb->setup_dma,
				sizeof(struct usb_ctrlrequest),
				DMA_TO_DEVICE);
	else if (urb->transfer_flags & URB_SETUP_MAP_LOCAL)
		hcd_free_coherent(urb->dev->bus,
				&urb->setup_dma,
				(void **) &urb->setup_packet,
				sizeof(struct usb_ctrlrequest),
				DMA_TO_DEVICE);

	dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
	if (urb->transfer_flags & URB_DMA_MAP_SG)
		dma_unmap_sg(hcd->self.controller,
				urb->sg->sg,
				urb->num_sgs,
				dir);
	else if (urb->transfer_flags & URB_DMA_MAP_PAGE)
		dma_unmap_page(hcd->self.controller,
				urb->transfer_dma,
				urb->transfer_buffer_length,
				dir);
	else if (urb->transfer_flags & URB_DMA_MAP_SINGLE)
		dma_unmap_single(hcd->self.controller,
				urb->transfer_dma,
				urb->transfer_buffer_length,
				dir);
	else if (urb->transfer_flags & URB_MAP_LOCAL)
		hcd_free_coherent(urb->dev->bus,
				&urb->transfer_dma,
				&urb->transfer_buffer,
				urb->transfer_buffer_length,
				dir);

	/* Make it safe to call this routine more than once */
	urb->transfer_flags &= ~(URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
			URB_DMA_MAP_SG | URB_DMA_MAP_PAGE |
			URB_DMA_MAP_SINGLE | URB_MAP_LOCAL);
}

static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
			   gfp_t mem_flags)
{
@@ -1270,8 +1315,6 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
	 * unless it uses pio or talks to another transport,
	 * or uses the provided scatter gather list for bulk.
	 */
	if (is_root_hub(urb->dev))
		return 0;

	if (usb_endpoint_xfer_control(&urb->ep->desc)
	    && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) {
@@ -1284,6 +1327,7 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
			if (dma_mapping_error(hcd->self.controller,
						urb->setup_dma))
				return -EAGAIN;
			urb->transfer_flags |= URB_SETUP_MAP_SINGLE;
		} else if (hcd->driver->flags & HCD_LOCAL_MEM)
			ret = hcd_alloc_coherent(
					urb->dev->bus, mem_flags,
@@ -1291,12 +1335,46 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
					(void **)&urb->setup_packet,
					sizeof(struct usb_ctrlrequest),
					DMA_TO_DEVICE);
			if (ret)
				return ret;
			urb->transfer_flags |= URB_SETUP_MAP_LOCAL;
	}

	dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
	if (ret == 0 && urb->transfer_buffer_length != 0
	if (urb->transfer_buffer_length != 0
	    && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
		if (hcd->self.uses_dma) {
			if (urb->num_sgs) {
				int n = dma_map_sg(
						hcd->self.controller,
						urb->sg->sg,
						urb->num_sgs,
						dir);
				if (n <= 0)
					ret = -EAGAIN;
				else
					urb->transfer_flags |= URB_DMA_MAP_SG;
				if (n != urb->num_sgs) {
					urb->num_sgs = n;
					urb->transfer_flags |=
							URB_DMA_SG_COMBINED;
				}
			} else if (urb->sg) {
				struct scatterlist *sg;

				sg = (struct scatterlist *) urb->sg;
				urb->transfer_dma = dma_map_page(
						hcd->self.controller,
						sg_page(sg),
						sg->offset,
						urb->transfer_buffer_length,
						dir);
				if (dma_mapping_error(hcd->self.controller,
						urb->transfer_dma))
					ret = -EAGAIN;
				else
					urb->transfer_flags |= URB_DMA_MAP_PAGE;
			} else {
				urb->transfer_dma = dma_map_single(
						hcd->self.controller,
						urb->transfer_buffer,
@@ -1304,7 +1382,10 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
						dir);
				if (dma_mapping_error(hcd->self.controller,
						urb->transfer_dma))
				return -EAGAIN;
					ret = -EAGAIN;
				else
					urb->transfer_flags |= URB_DMA_MAP_SINGLE;
			}
		} else if (hcd->driver->flags & HCD_LOCAL_MEM) {
			ret = hcd_alloc_coherent(
					urb->dev->bus, mem_flags,
@@ -1312,55 +1393,16 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
					&urb->transfer_buffer,
					urb->transfer_buffer_length,
					dir);

			if (ret && usb_endpoint_xfer_control(&urb->ep->desc)
			    && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP))
				hcd_free_coherent(urb->dev->bus,
					&urb->setup_dma,
					(void **)&urb->setup_packet,
					sizeof(struct usb_ctrlrequest),
					DMA_TO_DEVICE);
			if (ret == 0)
				urb->transfer_flags |= URB_MAP_LOCAL;
		}
		if (ret && (urb->transfer_flags & (URB_SETUP_MAP_SINGLE |
				URB_SETUP_MAP_LOCAL)))
			unmap_urb_for_dma(hcd, urb);
	}
	return ret;
}

static void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
{
	enum dma_data_direction dir;

	if (is_root_hub(urb->dev))
		return;

	if (usb_endpoint_xfer_control(&urb->ep->desc)
	    && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) {
		if (hcd->self.uses_dma)
			dma_unmap_single(hcd->self.controller, urb->setup_dma,
					sizeof(struct usb_ctrlrequest),
					DMA_TO_DEVICE);
		else if (hcd->driver->flags & HCD_LOCAL_MEM)
			hcd_free_coherent(urb->dev->bus, &urb->setup_dma,
					(void **)&urb->setup_packet,
					sizeof(struct usb_ctrlrequest),
					DMA_TO_DEVICE);
	}

	dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
	if (urb->transfer_buffer_length != 0
	    && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
		if (hcd->self.uses_dma)
			dma_unmap_single(hcd->self.controller,
					urb->transfer_dma,
					urb->transfer_buffer_length,
					dir);
		else if (hcd->driver->flags & HCD_LOCAL_MEM)
			hcd_free_coherent(urb->dev->bus, &urb->transfer_dma,
					&urb->transfer_buffer,
					urb->transfer_buffer_length,
					dir);
	}
}

/*-------------------------------------------------------------------------*/

/* may be called in any context with a valid urb->dev usecount
@@ -1389,21 +1431,20 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
	 * URBs must be submitted in process context with interrupts
	 * enabled.
	 */
	status = map_urb_for_dma(hcd, urb, mem_flags);
	if (unlikely(status)) {
		usbmon_urb_submit_error(&hcd->self, urb, status);
		goto error;
	}

	if (is_root_hub(urb->dev))
	if (is_root_hub(urb->dev)) {
		status = rh_urb_enqueue(hcd, urb);
	else
	} else {
		status = map_urb_for_dma(hcd, urb, mem_flags);
		if (likely(status == 0)) {
			status = hcd->driver->urb_enqueue(hcd, urb, mem_flags);
			if (unlikely(status))
				unmap_urb_for_dma(hcd, urb);
		}
	}

	if (unlikely(status)) {
		usbmon_urb_submit_error(&hcd->self, urb, status);
		unmap_urb_for_dma(hcd, urb);
 error:
		urb->hcpriv = NULL;
		INIT_LIST_HEAD(&urb->urb_list);
		atomic_dec(&urb->use_count);
+10 −35
Original line number Diff line number Diff line
@@ -259,9 +259,6 @@ static void sg_clean(struct usb_sg_request *io)
		kfree(io->urbs);
		io->urbs = NULL;
	}
	if (io->dev->dev.dma_mask != NULL)
		usb_buffer_unmap_sg(io->dev, usb_pipein(io->pipe),
				    io->sg, io->nents);
	io->dev = NULL;
}

@@ -364,7 +361,6 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
{
	int i;
	int urb_flags;
	int dma;
	int use_sg;

	if (!io || !dev || !sg
@@ -378,21 +374,9 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
	io->pipe = pipe;
	io->sg = sg;
	io->nents = nents;

	/* not all host controllers use DMA (like the mainstream pci ones);
	 * they can use PIO (sl811) or be software over another transport.
	 */
	dma = (dev->dev.dma_mask != NULL);
	if (dma)
		io->entries = usb_buffer_map_sg(dev, usb_pipein(pipe),
						sg, nents);
	else
	io->entries = nents;

	/* initialize all the urbs we'll use */
	if (io->entries <= 0)
		return io->entries;

	if (dev->bus->sg_tablesize > 0) {
		io->urbs = kmalloc(sizeof *io->urbs, mem_flags);
		use_sg = true;
@@ -404,8 +388,6 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
		goto nomem;

	urb_flags = 0;
	if (dma)
		urb_flags |= URB_NO_TRANSFER_DMA_MAP;
	if (usb_pipein(pipe))
		urb_flags |= URB_SHORT_NOT_OK;

@@ -423,12 +405,13 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,

		io->urbs[0]->complete = sg_complete;
		io->urbs[0]->context = io;

		/* A length of zero means transfer the whole sg list */
		io->urbs[0]->transfer_buffer_length = length;
		if (length == 0) {
			for_each_sg(sg, sg, io->entries, i) {
				io->urbs[0]->transfer_buffer_length +=
					sg_dma_len(sg);
					sg->length;
			}
		}
		io->urbs[0]->sg = io;
@@ -454,26 +437,16 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
			io->urbs[i]->context = io;

			/*
			 * Some systems need to revert to PIO when DMA is temporarily
			 * unavailable.  For their sakes, both transfer_buffer and
			 * transfer_dma are set when possible.
			 *
			 * Note that if IOMMU coalescing occurred, we cannot
			 * trust sg_page anymore, so check if S/G list shrunk.
			 * Some systems can't use DMA; they use PIO instead.
			 * For their sakes, transfer_buffer is set whenever
			 * possible.
			 */
			if (io->nents == io->entries && !PageHighMem(sg_page(sg)))
			if (!PageHighMem(sg_page(sg)))
				io->urbs[i]->transfer_buffer = sg_virt(sg);
			else
				io->urbs[i]->transfer_buffer = NULL;

			if (dma) {
				io->urbs[i]->transfer_dma = sg_dma_address(sg);
				len = sg_dma_len(sg);
			} else {
				/* hc may use _only_ transfer_buffer */
			len = sg->length;
			}

			if (length) {
				len = min_t(unsigned, len, length);
				length -= len;
@@ -481,6 +454,8 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
					io->entries = i + 1;
			}
			io->urbs[i]->transfer_buffer_length = len;

			io->urbs[i]->sg = (struct usb_sg_request *) sg;
		}
		io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT;
	}
+6 −3
Original line number Diff line number Diff line
@@ -333,9 +333,12 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
		is_out = usb_endpoint_dir_out(&ep->desc);
	}

	/* Cache the direction for later use */
	urb->transfer_flags = (urb->transfer_flags & ~URB_DIR_MASK) |
			(is_out ? URB_DIR_OUT : URB_DIR_IN);
	/* Clear the internal flags and cache the direction for later use */
	urb->transfer_flags &= ~(URB_DIR_MASK | URB_DMA_MAP_SINGLE |
			URB_DMA_MAP_PAGE | URB_DMA_MAP_SG | URB_MAP_LOCAL |
			URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
			URB_DMA_SG_COMBINED);
	urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN);

	if (xfertype != USB_ENDPOINT_XFER_CONTROL &&
			dev->state < USB_STATE_CONFIGURED)
+4 −0
Original line number Diff line number Diff line
@@ -881,6 +881,7 @@ void usb_buffer_unmap(struct urb *urb)
EXPORT_SYMBOL_GPL(usb_buffer_unmap);
#endif  /*  0  */

#if 0
/**
 * usb_buffer_map_sg - create scatterlist DMA mapping(s) for an endpoint
 * @dev: device to which the scatterlist will be mapped
@@ -924,6 +925,7 @@ int usb_buffer_map_sg(const struct usb_device *dev, int is_in,
			is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE) ? : -ENOMEM;
}
EXPORT_SYMBOL_GPL(usb_buffer_map_sg);
#endif

/* XXX DISABLED, no users currently.  If you wish to re-enable this
 * XXX please determine whether the sync is to transfer ownership of
@@ -960,6 +962,7 @@ void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in,
EXPORT_SYMBOL_GPL(usb_buffer_dmasync_sg);
#endif

#if 0
/**
 * usb_buffer_unmap_sg - free DMA mapping(s) for a scatterlist
 * @dev: device to which the scatterlist will be mapped
@@ -985,6 +988,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
			is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
EXPORT_SYMBOL_GPL(usb_buffer_unmap_sg);
#endif

/* To disable USB, kernel command line is 'nousb' not 'usbcore.nousb' */
#ifdef MODULE
+1 −1
Original line number Diff line number Diff line
@@ -646,7 +646,7 @@ int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
	wurb->urb = urb;
	INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);

	if (urb->sg) {
	if (urb->num_sgs) {
		ret = qset_add_urb_sg(whc, qset, urb, mem_flags);
		if (ret == -EINVAL) {
			qset_free_stds(qset, urb);
Loading