Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d115b048 authored by John Youn's avatar John Youn Committed by Greg Kroah-Hartman
Browse files

USB: xhci: Support for 64-byte contexts



Adds support for controllers that use 64-byte contexts.  The following context
data structures are affected by this: Device, Input, Input Control, Endpoint,
and Slot.  To accommodate the use of either 32 or 64-byte contexts, a Device or
Input context can only be accessed through functions which look-up and return
pointers to their contained contexts.

Signed-off-by: default avatarJohn Youn <johnyoun@synopsys.com>
Acked-by: default avatarSarah Sharp <sarah.a.sharp@linux.intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent 28c2d2ef
Loading
Loading
Loading
Loading
+80 −45
Original line number Diff line number Diff line
@@ -393,103 +393,138 @@ void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
			upper_32_bits(val));
}

dma_addr_t xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_slot_ctx *slot, dma_addr_t dma)
/* Print the last 32 bytes for 64-byte contexts */
static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma)
{
	int i;
	for (i = 0; i < 4; ++i) {
		xhci_dbg(xhci, "@%p (virt) @%08llx "
			 "(dma) %#08llx - rsvd64[%d]\n",
			 &ctx[4 + i], (unsigned long long)dma,
			 ctx[4 + i], i);
		dma += 8;
	}
}

void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
{
	/* Fields are 32 bits wide, DMA addresses are in bytes */
	int field_size = 32 / 8;
	int i;

	struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
	dma_addr_t dma = ctx->dma + ((unsigned long)slot_ctx - (unsigned long)ctx);
	int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);

	xhci_dbg(xhci, "Slot Context:\n");
	xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n",
			&slot->dev_info,
			(unsigned long long)dma, slot->dev_info);
			&slot_ctx->dev_info,
			(unsigned long long)dma, slot_ctx->dev_info);
	dma += field_size;
	xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n",
			&slot->dev_info2,
			(unsigned long long)dma, slot->dev_info2);
			&slot_ctx->dev_info2,
			(unsigned long long)dma, slot_ctx->dev_info2);
	dma += field_size;
	xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n",
			&slot->tt_info,
			(unsigned long long)dma, slot->tt_info);
			&slot_ctx->tt_info,
			(unsigned long long)dma, slot_ctx->tt_info);
	dma += field_size;
	xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n",
			&slot->dev_state,
			(unsigned long long)dma, slot->dev_state);
			&slot_ctx->dev_state,
			(unsigned long long)dma, slot_ctx->dev_state);
	dma += field_size;
	for (i = 0; i < 4; ++i) {
		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
				&slot->reserved[i], (unsigned long long)dma,
				slot->reserved[i], i);
				&slot_ctx->reserved[i], (unsigned long long)dma,
				slot_ctx->reserved[i], i);
		dma += field_size;
	}

	return dma;
	if (csz)
		dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
}

dma_addr_t xhci_dbg_ep_ctx(struct xhci_hcd *xhci, struct xhci_ep_ctx *ep, dma_addr_t dma, unsigned int last_ep)
void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
		     struct xhci_container_ctx *ctx,
		     unsigned int last_ep)
{
	int i, j;
	int last_ep_ctx = 31;
	/* Fields are 32 bits wide, DMA addresses are in bytes */
	int field_size = 32 / 8;
	int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);

	if (last_ep < 31)
		last_ep_ctx = last_ep + 1;
	for (i = 0; i < last_ep_ctx; ++i) {
		struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i);
		dma_addr_t dma = ctx->dma +
			((unsigned long)ep_ctx - (unsigned long)ctx);

		xhci_dbg(xhci, "Endpoint %02d Context:\n", i);
		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",
				&ep[i].ep_info,
				(unsigned long long)dma, ep[i].ep_info);
				&ep_ctx->ep_info,
				(unsigned long long)dma, ep_ctx->ep_info);
		dma += field_size;
		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n",
				&ep[i].ep_info2,
				(unsigned long long)dma, ep[i].ep_info2);
				&ep_ctx->ep_info2,
				(unsigned long long)dma, ep_ctx->ep_info2);
		dma += field_size;
		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08llx - deq\n",
				&ep[i].deq,
				(unsigned long long)dma, ep[i].deq);
				&ep_ctx->deq,
				(unsigned long long)dma, ep_ctx->deq);
		dma += 2*field_size;
		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n",
				&ep[i].tx_info,
				(unsigned long long)dma, ep[i].tx_info);
				&ep_ctx->tx_info,
				(unsigned long long)dma, ep_ctx->tx_info);
		dma += field_size;
		for (j = 0; j < 3; ++j) {
			xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
					&ep[i].reserved[j],
					&ep_ctx->reserved[j],
					(unsigned long long)dma,
					ep[i].reserved[j], j);
					ep_ctx->reserved[j], j);
			dma += field_size;
		}

		if (csz)
			dbg_rsvd64(xhci, (u64 *)ep_ctx, dma);
	}
	return dma;
}

void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep)
void xhci_dbg_ctx(struct xhci_hcd *xhci,
		  struct xhci_container_ctx *ctx,
		  unsigned int last_ep)
{
	int i;
	/* Fields are 32 bits wide, DMA addresses are in bytes */
	int field_size = 32 / 8;
	struct xhci_slot_ctx *slot_ctx;
	dma_addr_t dma = ctx->dma;
	int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);

	if (ctx->type == XHCI_CTX_TYPE_INPUT) {
		struct xhci_input_control_ctx *ctrl_ctx =
			xhci_get_input_control_ctx(xhci, ctx);
		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n",
			&ctx->drop_flags, (unsigned long long)dma,
			ctx->drop_flags);
			 &ctrl_ctx->drop_flags, (unsigned long long)dma,
			 ctrl_ctx->drop_flags);
		dma += field_size;
		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n",
			&ctx->add_flags, (unsigned long long)dma,
			ctx->add_flags);
			 &ctrl_ctx->add_flags, (unsigned long long)dma,
			 ctrl_ctx->add_flags);
		dma += field_size;
		for (i = 0; i < 6; ++i) {
		xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n",
				&ctx->rsvd[i], (unsigned long long)dma,
				ctx->rsvd[i], i);
			xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd2[%d]\n",
				 &ctrl_ctx->rsvd2[i], (unsigned long long)dma,
				 ctrl_ctx->rsvd2[i], i);
			dma += field_size;
		}
	dma = xhci_dbg_slot_ctx(xhci, &ctx->slot, dma);
	dma = xhci_dbg_ep_ctx(xhci, ctx->ep, dma, last_ep);

		if (csz)
			dbg_rsvd64(xhci, (u64 *)ctrl_ctx, dma);
	}

void xhci_dbg_device_ctx(struct xhci_hcd *xhci, struct xhci_device_ctx *ctx, dma_addr_t dma, unsigned int last_ep)
{
	dma = xhci_dbg_slot_ctx(xhci, &ctx->slot, dma);
	dma = xhci_dbg_ep_ctx(xhci, ctx->ep, dma, last_ep);
	slot_ctx = xhci_get_slot_ctx(xhci, ctx);
	xhci_dbg_slot_ctx(xhci, ctx);
	xhci_dbg_ep_ctx(xhci, ctx, last_ep);
}
+71 −50
Original line number Diff line number Diff line
@@ -722,7 +722,9 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
		struct usb_host_endpoint *ep)
{
	struct xhci_hcd *xhci;
	struct xhci_device_control *in_ctx;
	struct xhci_container_ctx *in_ctx, *out_ctx;
	struct xhci_input_control_ctx *ctrl_ctx;
	struct xhci_slot_ctx *slot_ctx;
	unsigned int last_ctx;
	unsigned int ep_index;
	struct xhci_ep_ctx *ep_ctx;
@@ -750,31 +752,34 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
	}

	in_ctx = xhci->devs[udev->slot_id]->in_ctx;
	out_ctx = xhci->devs[udev->slot_id]->out_ctx;
	ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
	ep_index = xhci_get_endpoint_index(&ep->desc);
	ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index];
	ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
	/* If the HC already knows the endpoint is disabled,
	 * or the HCD has noted it is disabled, ignore this request
	 */
	if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED ||
			in_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
			ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
		xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
				__func__, ep);
		return 0;
	}

	in_ctx->drop_flags |= drop_flag;
	new_drop_flags = in_ctx->drop_flags;
	ctrl_ctx->drop_flags |= drop_flag;
	new_drop_flags = ctrl_ctx->drop_flags;

	in_ctx->add_flags = ~drop_flag;
	new_add_flags = in_ctx->add_flags;
	ctrl_ctx->add_flags = ~drop_flag;
	new_add_flags = ctrl_ctx->add_flags;

	last_ctx = xhci_last_valid_endpoint(in_ctx->add_flags);
	last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags);
	slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
	/* Update the last valid endpoint context, if we deleted the last one */
	if ((in_ctx->slot.dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) {
		in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
		in_ctx->slot.dev_info |= LAST_CTX(last_ctx);
	if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) {
		slot_ctx->dev_info &= ~LAST_CTX_MASK;
		slot_ctx->dev_info |= LAST_CTX(last_ctx);
	}
	new_slot_info = in_ctx->slot.dev_info;
	new_slot_info = slot_ctx->dev_info;

	xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);

@@ -804,9 +809,11 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
		struct usb_host_endpoint *ep)
{
	struct xhci_hcd *xhci;
	struct xhci_device_control *in_ctx;
	struct xhci_container_ctx *in_ctx, *out_ctx;
	unsigned int ep_index;
	struct xhci_ep_ctx *ep_ctx;
	struct xhci_slot_ctx *slot_ctx;
	struct xhci_input_control_ctx *ctrl_ctx;
	u32 added_ctxs;
	unsigned int last_ctx;
	u32 new_add_flags, new_drop_flags, new_slot_info;
@@ -839,12 +846,14 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
	}

	in_ctx = xhci->devs[udev->slot_id]->in_ctx;
	out_ctx = xhci->devs[udev->slot_id]->out_ctx;
	ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
	ep_index = xhci_get_endpoint_index(&ep->desc);
	ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index];
	ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
	/* If the HCD has already noted the endpoint is enabled,
	 * ignore this request.
	 */
	if (in_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
	if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
		xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
				__func__, ep);
		return 0;
@@ -862,8 +871,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
		return -ENOMEM;
	}

	in_ctx->add_flags |= added_ctxs;
	new_add_flags = in_ctx->add_flags;
	ctrl_ctx->add_flags |= added_ctxs;
	new_add_flags = ctrl_ctx->add_flags;

	/* If xhci_endpoint_disable() was called for this endpoint, but the
	 * xHC hasn't been notified yet through the check_bandwidth() call,
@@ -871,14 +880,15 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
	 * descriptors.  We must drop and re-add this endpoint, so we leave the
	 * drop flags alone.
	 */
	new_drop_flags = in_ctx->drop_flags;
	new_drop_flags = ctrl_ctx->drop_flags;

	slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
	/* Update the last valid endpoint context, if we just added one past */
	if ((in_ctx->slot.dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) {
		in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
		in_ctx->slot.dev_info |= LAST_CTX(last_ctx);
	if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) {
		slot_ctx->dev_info &= ~LAST_CTX_MASK;
		slot_ctx->dev_info |= LAST_CTX(last_ctx);
	}
	new_slot_info = in_ctx->slot.dev_info;
	new_slot_info = slot_ctx->dev_info;

	/* Store the usb_device pointer for later use */
	ep->hcpriv = udev;
@@ -892,9 +902,11 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
	return 0;
}

static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev)
static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
{
	struct xhci_input_control_ctx *ctrl_ctx;
	struct xhci_ep_ctx *ep_ctx;
	struct xhci_slot_ctx *slot_ctx;
	int i;

	/* When a device's add flag and drop flag are zero, any subsequent
@@ -902,13 +914,15 @@ static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev)
	 * untouched.  Make sure we don't leave any old state in the input
	 * endpoint contexts.
	 */
	virt_dev->in_ctx->drop_flags = 0;
	virt_dev->in_ctx->add_flags = 0;
	virt_dev->in_ctx->slot.dev_info &= ~LAST_CTX_MASK;
	ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
	ctrl_ctx->drop_flags = 0;
	ctrl_ctx->add_flags = 0;
	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
	slot_ctx->dev_info &= ~LAST_CTX_MASK;
	/* Endpoint 0 is always valid */
	virt_dev->in_ctx->slot.dev_info |= LAST_CTX(1);
	slot_ctx->dev_info |= LAST_CTX(1);
	for (i = 1; i < 31; ++i) {
		ep_ctx = &virt_dev->in_ctx->ep[i];
		ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
		ep_ctx->ep_info = 0;
		ep_ctx->ep_info2 = 0;
		ep_ctx->deq = 0;
@@ -934,6 +948,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
	unsigned long flags;
	struct xhci_hcd *xhci;
	struct xhci_virt_device	*virt_dev;
	struct xhci_input_control_ctx *ctrl_ctx;
	struct xhci_slot_ctx *slot_ctx;

	ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
	if (ret <= 0)
@@ -949,16 +965,18 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
	virt_dev = xhci->devs[udev->slot_id];

	/* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
	virt_dev->in_ctx->add_flags |= SLOT_FLAG;
	virt_dev->in_ctx->add_flags &= ~EP0_FLAG;
	virt_dev->in_ctx->drop_flags &= ~SLOT_FLAG;
	virt_dev->in_ctx->drop_flags &= ~EP0_FLAG;
	ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
	ctrl_ctx->add_flags |= SLOT_FLAG;
	ctrl_ctx->add_flags &= ~EP0_FLAG;
	ctrl_ctx->drop_flags &= ~SLOT_FLAG;
	ctrl_ctx->drop_flags &= ~EP0_FLAG;
	xhci_dbg(xhci, "New Input Control Context:\n");
	xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma,
			LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info));
	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
	xhci_dbg_ctx(xhci, virt_dev->in_ctx,
			LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));

	spin_lock_irqsave(&xhci->lock, flags);
	ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx_dma,
	ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx->dma,
			udev->slot_id);
	if (ret < 0) {
		spin_unlock_irqrestore(&xhci->lock, flags);
@@ -1013,10 +1031,10 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
	}

	xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
	xhci_dbg_device_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma,
			LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info));
	xhci_dbg_ctx(xhci, virt_dev->out_ctx,
			LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));

	xhci_zero_in_ctx(virt_dev);
	xhci_zero_in_ctx(xhci, virt_dev);
	/* Free any old rings */
	for (i = 1; i < 31; ++i) {
		if (virt_dev->new_ep_rings[i]) {
@@ -1054,7 +1072,7 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
			virt_dev->new_ep_rings[i] = NULL;
		}
	}
	xhci_zero_in_ctx(virt_dev);
	xhci_zero_in_ctx(xhci, virt_dev);
}

/* Deal with stalled endpoints.  The core should have sent the control message
@@ -1187,6 +1205,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
	struct xhci_virt_device *virt_dev;
	int ret = 0;
	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
	struct xhci_slot_ctx *slot_ctx;
	struct xhci_input_control_ctx *ctrl_ctx;
	u64 temp_64;

	if (!udev->slot_id) {
@@ -1201,10 +1221,10 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
		xhci_setup_addressable_virt_dev(xhci, udev);
	/* Otherwise, assume the core has the device configured how it wants */
	xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
	xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2);
	xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);

	spin_lock_irqsave(&xhci->lock, flags);
	ret = xhci_queue_address_device(xhci, virt_dev->in_ctx_dma,
	ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
					udev->slot_id);
	if (ret) {
		spin_unlock_irqrestore(&xhci->lock, flags);
@@ -1246,7 +1266,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
		xhci_err(xhci, "ERROR: unexpected command completion "
				"code 0x%x.\n", virt_dev->cmd_status);
		xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
		xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 2);
		xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
		ret = -EINVAL;
		break;
	}
@@ -1261,19 +1281,21 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
			(unsigned long long)
				xhci->dcbaa->dev_context_ptrs[udev->slot_id]);
	xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
			(unsigned long long)virt_dev->out_ctx_dma);
			(unsigned long long)virt_dev->out_ctx->dma);
	xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
	xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2);
	xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
	xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
	xhci_dbg_device_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 2);
	xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
	/*
	 * USB core uses address 1 for the roothubs, so we add one to the
	 * address given back to us by the HC.
	 */
	udev->devnum = (virt_dev->out_ctx->slot.dev_state & DEV_ADDR_MASK) + 1;
	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
	udev->devnum = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1;
	/* Zero the input context control for later use */
	virt_dev->in_ctx->add_flags = 0;
	virt_dev->in_ctx->drop_flags = 0;
	ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
	ctrl_ctx->add_flags = 0;
	ctrl_ctx->drop_flags = 0;

	xhci_dbg(xhci, "Device address = %d\n", udev->devnum);
	/* XXX Meh, not sure if anyone else but choose_address uses this. */
@@ -1315,7 +1337,6 @@ static int __init xhci_hcd_init(void)
	/* xhci_device_control has eight fields, and also
	 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
	 */
	BUILD_BUG_ON(sizeof(struct xhci_device_control) != (8+8+8*31)*32/8);
	BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
	BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
	BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
+87 −34
Original line number Diff line number Diff line
@@ -189,6 +189,63 @@ fail:
	return 0;
}

#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)

struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
						    int type, gfp_t flags)
{
	struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags);
	if (!ctx)
		return NULL;

	BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT));
	ctx->type = type;
	ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024;
	if (type == XHCI_CTX_TYPE_INPUT)
		ctx->size += CTX_SIZE(xhci->hcc_params);

	ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma);
	memset(ctx->bytes, 0, ctx->size);
	return ctx;
}

void xhci_free_container_ctx(struct xhci_hcd *xhci,
			     struct xhci_container_ctx *ctx)
{
	dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
	kfree(ctx);
}

struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci,
					      struct xhci_container_ctx *ctx)
{
	BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT);
	return (struct xhci_input_control_ctx *)ctx->bytes;
}

struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
					struct xhci_container_ctx *ctx)
{
	if (ctx->type == XHCI_CTX_TYPE_DEVICE)
		return (struct xhci_slot_ctx *)ctx->bytes;

	return (struct xhci_slot_ctx *)
		(ctx->bytes + CTX_SIZE(xhci->hcc_params));
}

struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
				    struct xhci_container_ctx *ctx,
				    unsigned int ep_index)
{
	/* increment ep index by offset of start of ep ctx array */
	ep_index++;
	if (ctx->type == XHCI_CTX_TYPE_INPUT)
		ep_index++;

	return (struct xhci_ep_ctx *)
		(ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
}

/* All the xhci_tds in the ring's TD list should be freed at this point */
void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
{
@@ -209,11 +266,10 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
			xhci_ring_free(xhci, dev->ep_rings[i]);

	if (dev->in_ctx)
		dma_pool_free(xhci->device_pool,
				dev->in_ctx, dev->in_ctx_dma);
		xhci_free_container_ctx(xhci, dev->in_ctx);
	if (dev->out_ctx)
		dma_pool_free(xhci->device_pool,
				dev->out_ctx, dev->out_ctx_dma);
		xhci_free_container_ctx(xhci, dev->out_ctx);

	kfree(xhci->devs[slot_id]);
	xhci->devs[slot_id] = 0;
}
@@ -221,7 +277,6 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
		struct usb_device *udev, gfp_t flags)
{
	dma_addr_t	dma;
	struct xhci_virt_device *dev;

	/* Slot ID 0 is reserved */
@@ -235,26 +290,21 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
		return 0;
	dev = xhci->devs[slot_id];

	/* Allocate the (output) device context that will be used in the HC.
	 * The structure is 32 bytes smaller than the input context, but that's
	 * fine.
	 */
	dev->out_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma);
	/* Allocate the (output) device context that will be used in the HC. */
	dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
	if (!dev->out_ctx)
		goto fail;
	dev->out_ctx_dma = dma;

	xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id,
			(unsigned long long)dma);
	memset(dev->out_ctx, 0, sizeof(*dev->out_ctx));
			(unsigned long long)dev->out_ctx->dma);

	/* Allocate the (input) device context for address device command */
	dev->in_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma);
	dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags);
	if (!dev->in_ctx)
		goto fail;
	dev->in_ctx_dma = dma;

	xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
			(unsigned long long)dma);
	memset(dev->in_ctx, 0, sizeof(*dev->in_ctx));
			(unsigned long long)dev->in_ctx->dma);

	/* Allocate endpoint 0 ring */
	dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags);
@@ -264,7 +314,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
	init_completion(&dev->cmd_completion);

	/* Point to output device context in dcbaa. */
	xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx_dma;
	xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma;
	xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
			slot_id,
			&xhci->dcbaa->dev_context_ptrs[slot_id],
@@ -282,6 +332,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
	struct xhci_virt_device *dev;
	struct xhci_ep_ctx	*ep0_ctx;
	struct usb_device	*top_dev;
	struct xhci_slot_ctx    *slot_ctx;
	struct xhci_input_control_ctx *ctrl_ctx;

	dev = xhci->devs[udev->slot_id];
	/* Slot ID 0 is reserved */
@@ -290,27 +342,29 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
				udev->slot_id);
		return -EINVAL;
	}
	ep0_ctx = &dev->in_ctx->ep[0];
	ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0);
	ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx);
	slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);

	/* 2) New slot context and endpoint 0 context are valid*/
	dev->in_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
	ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG;

	/* 3) Only the control endpoint is valid - one endpoint context */
	dev->in_ctx->slot.dev_info |= LAST_CTX(1);
	slot_ctx->dev_info |= LAST_CTX(1);

	switch (udev->speed) {
	case USB_SPEED_SUPER:
		dev->in_ctx->slot.dev_info |= (u32) udev->route;
		dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_SS;
		slot_ctx->dev_info |= (u32) udev->route;
		slot_ctx->dev_info |= (u32) SLOT_SPEED_SS;
		break;
	case USB_SPEED_HIGH:
		dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_HS;
		slot_ctx->dev_info |= (u32) SLOT_SPEED_HS;
		break;
	case USB_SPEED_FULL:
		dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_FS;
		slot_ctx->dev_info |= (u32) SLOT_SPEED_FS;
		break;
	case USB_SPEED_LOW:
		dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_LS;
		slot_ctx->dev_info |= (u32) SLOT_SPEED_LS;
		break;
	case USB_SPEED_VARIABLE:
		xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
@@ -324,7 +378,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
	for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
			top_dev = top_dev->parent)
		/* Found device below root hub */;
	dev->in_ctx->slot.dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
	slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum);
	xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);

	/* Is this a LS/FS device under a HS hub? */
@@ -334,8 +388,8 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
	 */
	if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) &&
			udev->tt) {
		dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id;
		dev->in_ctx->slot.tt_info |= udev->ttport << 8;
		slot_ctx->tt_info = udev->tt->hub->slot_id;
		slot_ctx->tt_info |= udev->ttport << 8;
	}
	xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
	xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
@@ -466,7 +520,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
	unsigned int max_burst;

	ep_index = xhci_get_endpoint_index(&ep->desc);
	ep_ctx = &virt_dev->in_ctx->ep[ep_index];
	ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);

	/* Set up the endpoint ring */
	virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags);
@@ -533,7 +587,7 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci,
	struct xhci_ep_ctx *ep_ctx;

	ep_index = xhci_get_endpoint_index(&ep->desc);
	ep_ctx = &virt_dev->in_ctx->ep[ep_index];
	ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);

	ep_ctx->ep_info = 0;
	ep_ctx->ep_info2 = 0;
@@ -753,11 +807,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
	 */
	xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
			SEGMENT_SIZE, 64, xhci->page_size);

	/* See Table 46 and Note on Figure 55 */
	/* FIXME support 64-byte contexts */
	xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev,
			sizeof(struct xhci_device_control),
			64, xhci->page_size);
			2112, 64, xhci->page_size);
	if (!xhci->segment_pool || !xhci->device_pool)
		goto fail;

+15 −7

File changed.

Preview size limit exceeded, changes collapsed.

+34 −27

File changed.

Preview size limit exceeded, changes collapsed.