Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3ffbba95 authored by Sarah Sharp's avatar Sarah Sharp Committed by Greg Kroah-Hartman
Browse files

USB: xhci: Allocate and address USB devices



xHCI needs to get a "Slot ID" from the host controller and allocate other
data structures for every USB device.  Make usb_alloc_dev() and
usb_release_dev() allocate and free these device structures.  After
setting up the xHC device structures, usb_alloc_dev() must wait for the
hardware to respond to an Enable Slot command.  usb_alloc_dev() fires off
a Disable Slot command and does not wait for it to complete.

When the USB core wants to choose an address for the device, the xHCI
driver must issue a Set Address command and wait for an event for that
command.

Signed-off-by: default avatarSarah Sharp <sarah.a.sharp@linux.intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent c6515272
Loading
Loading
Loading
Loading
+79 −0
Original line number Diff line number Diff line
@@ -410,3 +410,82 @@ void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
	val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[1]);
	xhci_dbg(xhci, "// xHC command ring deq ptr high bits = 0x%x\n", val);
}

void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep)
{
	int i, j;
	int last_ep_ctx = 31;
	/* Fields are 32 bits wide, DMA addresses are in bytes */
	int field_size = 32 / 8;

	xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - drop flags\n",
			(unsigned int) &ctx->drop_flags,
			dma, ctx->drop_flags);
	dma += field_size;
	xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - add flags\n",
			(unsigned int) &ctx->add_flags,
			dma, ctx->add_flags);
	dma += field_size;
	for (i = 0; i > 6; ++i) {
		xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - rsvd[%d]\n",
				(unsigned int) &ctx->rsvd[i],
				dma, ctx->rsvd[i], i);
		dma += field_size;
	}

	xhci_dbg(xhci, "Slot Context:\n");
	xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - dev_info\n",
			(unsigned int) &ctx->slot.dev_info,
			dma, ctx->slot.dev_info);
	dma += field_size;
	xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - dev_info2\n",
			(unsigned int) &ctx->slot.dev_info2,
			dma, ctx->slot.dev_info2);
	dma += field_size;
	xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - tt_info\n",
			(unsigned int) &ctx->slot.tt_info,
			dma, ctx->slot.tt_info);
	dma += field_size;
	xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - dev_state\n",
			(unsigned int) &ctx->slot.dev_state,
			dma, ctx->slot.dev_state);
	dma += field_size;
	for (i = 0; i > 4; ++i) {
		xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - rsvd[%d]\n",
				(unsigned int) &ctx->slot.reserved[i],
				dma, ctx->slot.reserved[i], i);
		dma += field_size;
	}

	if (last_ep < 31)
		last_ep_ctx = last_ep + 1;
	for (i = 0; i < last_ep_ctx; ++i) {
		xhci_dbg(xhci, "Endpoint %02d Context:\n", i);
		xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - ep_info\n",
				(unsigned int) &ctx->ep[i].ep_info,
				dma, ctx->ep[i].ep_info);
		dma += field_size;
		xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - ep_info2\n",
				(unsigned int) &ctx->ep[i].ep_info2,
				dma, ctx->ep[i].ep_info2);
		dma += field_size;
		xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - deq[0]\n",
				(unsigned int) &ctx->ep[i].deq[0],
				dma, ctx->ep[i].deq[0]);
		dma += field_size;
		xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - deq[1]\n",
				(unsigned int) &ctx->ep[i].deq[1],
				dma, ctx->ep[i].deq[1]);
		dma += field_size;
		xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - tx_info\n",
				(unsigned int) &ctx->ep[i].tx_info,
				dma, ctx->ep[i].tx_info);
		dma += field_size;
		for (j = 0; j < 3; ++j) {
			xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - rsvd[%d]\n",
					(unsigned int) &ctx->ep[i].reserved[j],
					dma, ctx->ep[i].reserved[j], j);
			dma += field_size;
		}
	}
}
+201 −0
Original line number Diff line number Diff line
@@ -318,6 +318,16 @@ void event_ring_work(unsigned long arg)
	xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
	xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
	xhci_dbg_cmd_ptrs(xhci);
	for (i = 0; i < MAX_HC_SLOTS; ++i) {
		if (xhci->devs[i]) {
			for (j = 0; j < 31; ++j) {
				if (xhci->devs[i]->ep_rings[j]) {
					xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j);
					xhci_debug_segment(xhci, xhci->devs[i]->ep_rings[j]->deq_seg);
				}
			}
		}
	}

	if (xhci->noops_submitted != NUM_TEST_NOOPS)
		if (setup_one_noop(xhci))
@@ -499,6 +509,197 @@ void xhci_shutdown(struct usb_hcd *hcd)

/*-------------------------------------------------------------------------*/

/*
 * At this point, the struct usb_device is about to go away, the device has
 * disconnected, and all traffic has been stopped and the endpoints have been
 * disabled.  Free any HC data structures associated with that device.
 */
void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
{
	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
	unsigned long flags;

	if (udev->slot_id == 0)
		return;

	spin_lock_irqsave(&xhci->lock, flags);
	if (queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
		spin_unlock_irqrestore(&xhci->lock, flags);
		xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
		return;
	}
	ring_cmd_db(xhci);
	spin_unlock_irqrestore(&xhci->lock, flags);
	/*
	 * Event command completion handler will free any data structures
	 * associated with the slot
	 */
}

/*
 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
 * timed out, or allocating memory failed.  Returns 1 on success.
 */
int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
{
	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
	unsigned long flags;
	int timeleft;
	int ret;

	spin_lock_irqsave(&xhci->lock, flags);
	ret = queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
	if (ret) {
		spin_unlock_irqrestore(&xhci->lock, flags);
		xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
		return 0;
	}
	ring_cmd_db(xhci);
	spin_unlock_irqrestore(&xhci->lock, flags);

	/* XXX: how much time for xHC slot assignment? */
	timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
			USB_CTRL_SET_TIMEOUT);
	if (timeleft <= 0) {
		xhci_warn(xhci, "%s while waiting for a slot\n",
				timeleft == 0 ? "Timeout" : "Signal");
		/* FIXME cancel the enable slot request */
		return 0;
	}

	spin_lock_irqsave(&xhci->lock, flags);
	if (!xhci->slot_id) {
		xhci_err(xhci, "Error while assigning device slot ID\n");
		spin_unlock_irqrestore(&xhci->lock, flags);
		return 0;
	}
	if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) {
		/* Disable slot, if we can do it without mem alloc */
		xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
		if (!queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
			ring_cmd_db(xhci);
		spin_unlock_irqrestore(&xhci->lock, flags);
		return 0;
	}
	udev->slot_id = xhci->slot_id;
	/* Is this a LS or FS device under a HS hub? */
	/* Hub or peripherial? */
	spin_unlock_irqrestore(&xhci->lock, flags);
	return 1;
}

/*
 * Issue an Address Device command (which will issue a SetAddress request to
 * the device).
 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
 * we should only issue and wait on one address command at the same time.
 *
 * We add one to the device address issued by the hardware because the USB core
 * uses address 1 for the root hubs (even though they're not really devices).
 */
int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
{
	unsigned long flags;
	int timeleft;
	struct xhci_virt_device *virt_dev;
	int ret = 0;
	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
	u32 temp;

	if (!udev->slot_id) {
		xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
		return -EINVAL;
	}

	spin_lock_irqsave(&xhci->lock, flags);
	virt_dev = xhci->devs[udev->slot_id];

	/* If this is a Set Address to an unconfigured device, setup ep 0 */
	if (!udev->config)
		xhci_setup_addressable_virt_dev(xhci, udev);
	/* Otherwise, assume the core has the device configured how it wants */

	ret = queue_address_device(xhci, virt_dev->in_ctx_dma, udev->slot_id);
	if (ret) {
		spin_unlock_irqrestore(&xhci->lock, flags);
		xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
		return ret;
	}
	ring_cmd_db(xhci);
	spin_unlock_irqrestore(&xhci->lock, flags);

	/* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
	timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
			USB_CTRL_SET_TIMEOUT);
	/* FIXME: From section 4.3.4: "Software shall be responsible for timing
	 * the SetAddress() "recovery interval" required by USB and aborting the
	 * command on a timeout.
	 */
	if (timeleft <= 0) {
		xhci_warn(xhci, "%s while waiting for a slot\n",
				timeleft == 0 ? "Timeout" : "Signal");
		/* FIXME cancel the address device command */
		return -ETIME;
	}

	spin_lock_irqsave(&xhci->lock, flags);
	switch (virt_dev->cmd_status) {
	case COMP_CTX_STATE:
	case COMP_EBADSLT:
		xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
				udev->slot_id);
		ret = -EINVAL;
		break;
	case COMP_TX_ERR:
		dev_warn(&udev->dev, "Device not responding to set address.\n");
		ret = -EPROTO;
		break;
	case COMP_SUCCESS:
		xhci_dbg(xhci, "Successful Address Device command\n");
		break;
	default:
		xhci_err(xhci, "ERROR: unexpected command completion "
				"code 0x%x.\n", virt_dev->cmd_status);
		ret = -EINVAL;
		break;
	}
	if (ret) {
		spin_unlock_irqrestore(&xhci->lock, flags);
		return ret;
	}
	temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[0]);
	xhci_dbg(xhci, "Op regs DCBAA ptr[0] = %#08x\n", temp);
	temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[1]);
	xhci_dbg(xhci, "Op regs DCBAA ptr[1] = %#08x\n", temp);
	xhci_dbg(xhci, "Slot ID %d dcbaa entry[0] @%08x = %#08x\n",
			udev->slot_id,
			(unsigned int) &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id],
			xhci->dcbaa->dev_context_ptrs[2*udev->slot_id]);
	xhci_dbg(xhci, "Slot ID %d dcbaa entry[1] @%08x = %#08x\n",
			udev->slot_id,
			(unsigned int) &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1],
			xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1]);
	xhci_dbg(xhci, "Output Context DMA address = %#08x\n",
			virt_dev->out_ctx_dma);
	xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
	xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2);
	xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
	xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 2);
	/*
	 * USB core uses address 1 for the roothubs, so we add one to the
	 * address given back to us by the HC.
	 */
	udev->devnum = (virt_dev->out_ctx->slot.dev_state & DEV_ADDR_MASK) + 1;
	/* FIXME: Zero the input context control for later use? */
	spin_unlock_irqrestore(&xhci->lock, flags);

	xhci_dbg(xhci, "Device address = %d\n", udev->devnum);
	/* XXX Meh, not sure if anyone else but choose_address uses this. */
	set_bit(udev->devnum, udev->bus->devmap.devicemap);

	return 0;
}

int xhci_get_frame(struct usb_hcd *hcd)
{
	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+199 −5
Original line number Diff line number Diff line
@@ -188,12 +188,187 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
	return 0;
}

void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
{
	struct xhci_virt_device *dev;
	int i;

	/* Slot ID 0 is reserved */
	if (slot_id == 0 || !xhci->devs[slot_id])
		return;

	dev = xhci->devs[slot_id];
	xhci->dcbaa->dev_context_ptrs[2*slot_id] = 0;
	xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;
	if (!dev)
		return;

	for (i = 0; i < 31; ++i)
		if (dev->ep_rings[i])
			xhci_ring_free(xhci, dev->ep_rings[i]);

	if (dev->in_ctx)
		dma_pool_free(xhci->device_pool,
				dev->in_ctx, dev->in_ctx_dma);
	if (dev->out_ctx)
		dma_pool_free(xhci->device_pool,
				dev->out_ctx, dev->out_ctx_dma);
	kfree(xhci->devs[slot_id]);
	xhci->devs[slot_id] = 0;
}

int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
		struct usb_device *udev, gfp_t flags)
{
	dma_addr_t	dma;
	struct xhci_virt_device *dev;

	/* Slot ID 0 is reserved */
	if (slot_id == 0 || xhci->devs[slot_id]) {
		xhci_warn(xhci, "Bad Slot ID %d\n", slot_id);
		return 0;
	}

	xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
	if (!xhci->devs[slot_id])
		return 0;
	dev = xhci->devs[slot_id];

	/* Allocate the (output) device context that will be used in the HC */
	dev->out_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma);
	if (!dev->out_ctx)
		goto fail;
	dev->out_ctx_dma = dma;
	xhci_dbg(xhci, "Slot %d output ctx = 0x%x (dma)\n", slot_id, dma);
	memset(dev->out_ctx, 0, sizeof(*dev->out_ctx));

	/* Allocate the (input) device context for address device command */
	dev->in_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma);
	if (!dev->in_ctx)
		goto fail;
	dev->in_ctx_dma = dma;
	xhci_dbg(xhci, "Slot %d input ctx = 0x%x (dma)\n", slot_id, dma);
	memset(dev->in_ctx, 0, sizeof(*dev->in_ctx));

	/* Allocate endpoint 0 ring */
	dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags);
	if (!dev->ep_rings[0])
		goto fail;

	/*
	 * Point to output device context in dcbaa; skip the output control
	 * context, which is eight 32 bit fields (or 32 bytes long)
	 */
	xhci->dcbaa->dev_context_ptrs[2*slot_id] =
		(u32) dev->out_ctx_dma + (32);
	xhci_dbg(xhci, "Set slot id %d dcbaa entry 0x%x to 0x%x\n",
			slot_id,
			(unsigned int) &xhci->dcbaa->dev_context_ptrs[2*slot_id],
			dev->out_ctx_dma);
	xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0;

	return 1;
fail:
	xhci_free_virt_device(xhci, slot_id);
	return 0;
}

/* Setup an xHCI virtual device for a Set Address command */
int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
{
	struct xhci_virt_device *dev;
	struct xhci_ep_ctx	*ep0_ctx;
	struct usb_device	*top_dev;

	dev = xhci->devs[udev->slot_id];
	/* Slot ID 0 is reserved */
	if (udev->slot_id == 0 || !dev) {
		xhci_warn(xhci, "Slot ID %d is not assigned to this device\n",
				udev->slot_id);
		return -EINVAL;
	}
	ep0_ctx = &dev->in_ctx->ep[0];

	/* 2) New slot context and endpoint 0 context are valid*/
	dev->in_ctx->add_flags = SLOT_FLAG | EP0_FLAG;

	/* 3) Only the control endpoint is valid - one endpoint context */
	dev->in_ctx->slot.dev_info |= LAST_CTX(1);

	switch (udev->speed) {
	case USB_SPEED_SUPER:
		dev->in_ctx->slot.dev_info |= (u32) udev->route;
		dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_SS;
		break;
	case USB_SPEED_HIGH:
		dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_HS;
		break;
	case USB_SPEED_FULL:
		dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_FS;
		break;
	case USB_SPEED_LOW:
		dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_LS;
		break;
	case USB_SPEED_VARIABLE:
		xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
		return -EINVAL;
		break;
	default:
		/* Speed was set earlier, this shouldn't happen. */
		BUG();
	}
	/* Find the root hub port this device is under */
	for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
			top_dev = top_dev->parent)
		/* Found device below root hub */;
	dev->in_ctx->slot.dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
	xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);

	/* Is this a LS/FS device under a HS hub? */
	/*
	 * FIXME: I don't think this is right, where does the TT info for the
	 * roothub or parent hub come from?
	 */
	if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) &&
			udev->tt) {
		dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id;
		dev->in_ctx->slot.tt_info |= udev->ttport << 8;
	}
	xhci_dbg(xhci, "udev->tt = 0x%x\n", (unsigned int) udev->tt);
	xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);

	/* Step 4 - ring already allocated */
	/* Step 5 */
	ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP);
	/*
	 * See section 4.3 bullet 6:
	 * The default Max Packet size for ep0 is "8 bytes for a USB2
	 * LS/FS/HS device or 512 bytes for a USB3 SS device"
	 * XXX: Not sure about wireless USB devices.
	 */
	if (udev->speed == USB_SPEED_SUPER)
		ep0_ctx->ep_info2 |= MAX_PACKET(512);
	else
		ep0_ctx->ep_info2 |= MAX_PACKET(8);
	/* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
	ep0_ctx->ep_info2 |= MAX_BURST(0);
	ep0_ctx->ep_info2 |= ERROR_COUNT(3);

	ep0_ctx->deq[0] =
		dev->ep_rings[0]->first_seg->dma;
	ep0_ctx->deq[0] |= dev->ep_rings[0]->cycle_state;
	ep0_ctx->deq[1] = 0;

	/* Steps 7 and 8 were done in xhci_alloc_virt_device() */

	return 0;
}

void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
	struct pci_dev	*pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
	int size;

	/* XXX: Free all the segments in the various rings */
	int i;

	/* Free the Event Ring Segment Table and the actual Event Ring */
	xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
@@ -218,16 +393,27 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
		xhci_ring_free(xhci, xhci->cmd_ring);
	xhci->cmd_ring = NULL;
	xhci_dbg(xhci, "Freed command ring\n");

	for (i = 1; i < MAX_HC_SLOTS; ++i)
		xhci_free_virt_device(xhci, i);

	if (xhci->segment_pool)
		dma_pool_destroy(xhci->segment_pool);
	xhci->segment_pool = NULL;
	xhci_dbg(xhci, "Freed segment pool\n");

	if (xhci->device_pool)
		dma_pool_destroy(xhci->device_pool);
	xhci->device_pool = NULL;
	xhci_dbg(xhci, "Freed device context pool\n");

	xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]);
	xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]);
	if (xhci->dcbaa)
		pci_free_consistent(pdev, sizeof(*xhci->dcbaa),
				xhci->dcbaa, xhci->dcbaa->dma);
	xhci->dcbaa = NULL;

	xhci->page_size = 0;
	xhci->page_shift = 0;
}
@@ -280,8 +466,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
		goto fail;
	memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
	xhci->dcbaa->dma = dma;
	xhci_dbg(xhci, "// Setting device context base array address to 0x%x\n",
			xhci->dcbaa->dma);
	xhci_dbg(xhci, "// Device context base array address = 0x%x (DMA), 0x%x (virt)\n",
			xhci->dcbaa->dma, (unsigned int) xhci->dcbaa);
	xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]);
	xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]);

@@ -293,7 +479,12 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
	 */
	xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
			SEGMENT_SIZE, 64, xhci->page_size);
	if (!xhci->segment_pool)
	/* See Table 46 and Note on Figure 55 */
	/* FIXME support 64-byte contexts */
	xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
			sizeof(struct xhci_device_control),
			64, xhci->page_size);
	if (!xhci->segment_pool || !xhci->device_pool)
		goto fail;

	/* Set up the command ring to have one segments for now. */
@@ -385,6 +576,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
	 * something other than the default (~1ms minimum between interrupts).
	 * See section 5.5.1.2.
	 */
	init_completion(&xhci->addr_dev);
	for (i = 0; i < MAX_HC_SLOTS; ++i)
		xhci->devs[i] = 0;

	return 0;
fail:
+7 −0
Original line number Diff line number Diff line
@@ -108,6 +108,13 @@ static const struct hc_driver xhci_pci_hc_driver = {
	.stop =			xhci_stop,
	.shutdown =		xhci_shutdown,

	/*
	 * managing i/o requests and associated device resources
	 */
	.alloc_dev =		xhci_alloc_dev,
	.free_dev =		xhci_free_dev,
	.address_device =	xhci_address_device,

	/*
	 * scheduling support
	 */
+30 −4
Original line number Diff line number Diff line
@@ -252,13 +252,10 @@ void ring_cmd_db(struct xhci_hcd *xhci)
static void handle_cmd_completion(struct xhci_hcd *xhci,
		struct xhci_event_cmd *event)
{
	int slot_id = TRB_TO_SLOT_ID(event->flags);
	u64 cmd_dma;
	dma_addr_t cmd_dequeue_dma;

	/* Check completion code */
	if (GET_COMP_CODE(event->status) != COMP_SUCCESS)
		xhci_dbg(xhci, "WARN: unsuccessful no-op command\n");

	cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0];
	cmd_dequeue_dma = trb_virt_to_dma(xhci->cmd_ring->deq_seg,
			xhci->cmd_ring->dequeue);
@@ -273,6 +270,21 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
		return;
	}
	switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) {
	case TRB_TYPE(TRB_ENABLE_SLOT):
		if (GET_COMP_CODE(event->status) == COMP_SUCCESS)
			xhci->slot_id = slot_id;
		else
			xhci->slot_id = 0;
		complete(&xhci->addr_dev);
		break;
	case TRB_TYPE(TRB_DISABLE_SLOT):
		if (xhci->devs[slot_id])
			xhci_free_virt_device(xhci, slot_id);
		break;
	case TRB_TYPE(TRB_ADDR_DEV):
		xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
		complete(&xhci->addr_dev);
		break;
	case TRB_TYPE(TRB_CMD_NOOP):
		++xhci->noops_handled;
		break;
@@ -400,3 +412,17 @@ void *setup_one_noop(struct xhci_hcd *xhci)
	xhci->noops_submitted++;
	return ring_cmd_db;
}

/* Queue a slot enable or disable request on the command ring */
int queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
{
	return queue_command(xhci, 0, 0, 0,
			TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id));
}

/* Queue an address device command TRB */
int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id)
{
	return queue_command(xhci, in_ctx_ptr, 0, 0,
			TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id));
}
Loading