Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 28ccd296 authored by Matt Evans's avatar Matt Evans Committed by Sarah Sharp
Browse files

xhci: Make xHCI driver endian-safe



This patch changes the struct members defining access to xHCI device-visible
memory to use __le32/__le64 where appropriate, and then adds swaps where
required.  Checked with sparse that all accesses are correct.

MMIO accesses use readl/writel so already are performed LE, but prototypes
now reflect this with __le*.

There were a couple of (debug) instances of DMA pointers being truncated to
32bits which have been fixed too.

Signed-off-by: default avatarMatt Evans <matt@ozlabs.org>
Signed-off-by: default avatarSarah Sharp <sarah.a.sharp@linux.intel.com>
parent 7fc2a616
Loading
Loading
Loading
Loading
+26 −25
Original line number Original line Diff line number Diff line
@@ -147,7 +147,7 @@ static void xhci_print_op_regs(struct xhci_hcd *xhci)


static void xhci_print_ports(struct xhci_hcd *xhci)
static void xhci_print_ports(struct xhci_hcd *xhci)
{
{
	u32 __iomem *addr;
	__le32 __iomem *addr;
	int i, j;
	int i, j;
	int ports;
	int ports;
	char *names[NUM_PORT_REGS] = {
	char *names[NUM_PORT_REGS] = {
@@ -253,27 +253,27 @@ void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb)
void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
{
{
	u64	address;
	u64	address;
	u32	type = xhci_readl(xhci, &trb->link.control) & TRB_TYPE_BITMASK;
	u32	type = le32_to_cpu(trb->link.control) & TRB_TYPE_BITMASK;


	switch (type) {
	switch (type) {
	case TRB_TYPE(TRB_LINK):
	case TRB_TYPE(TRB_LINK):
		xhci_dbg(xhci, "Link TRB:\n");
		xhci_dbg(xhci, "Link TRB:\n");
		xhci_print_trb_offsets(xhci, trb);
		xhci_print_trb_offsets(xhci, trb);


		address = trb->link.segment_ptr;
		address = le64_to_cpu(trb->link.segment_ptr);
		xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address);
		xhci_dbg(xhci, "Next ring segment DMA address = 0x%llx\n", address);


		xhci_dbg(xhci, "Interrupter target = 0x%x\n",
		xhci_dbg(xhci, "Interrupter target = 0x%x\n",
				GET_INTR_TARGET(trb->link.intr_target));
			 GET_INTR_TARGET(le32_to_cpu(trb->link.intr_target)));
		xhci_dbg(xhci, "Cycle bit = %u\n",
		xhci_dbg(xhci, "Cycle bit = %u\n",
				(unsigned int) (trb->link.control & TRB_CYCLE));
			 (unsigned int) (le32_to_cpu(trb->link.control) & TRB_CYCLE));
		xhci_dbg(xhci, "Toggle cycle bit = %u\n",
		xhci_dbg(xhci, "Toggle cycle bit = %u\n",
				(unsigned int) (trb->link.control & LINK_TOGGLE));
			 (unsigned int) (le32_to_cpu(trb->link.control) & LINK_TOGGLE));
		xhci_dbg(xhci, "No Snoop bit = %u\n",
		xhci_dbg(xhci, "No Snoop bit = %u\n",
				(unsigned int) (trb->link.control & TRB_NO_SNOOP));
			 (unsigned int) (le32_to_cpu(trb->link.control) & TRB_NO_SNOOP));
		break;
		break;
	case TRB_TYPE(TRB_TRANSFER):
	case TRB_TYPE(TRB_TRANSFER):
		address = trb->trans_event.buffer;
		address = le64_to_cpu(trb->trans_event.buffer);
		/*
		/*
		 * FIXME: look at flags to figure out if it's an address or if
		 * FIXME: look at flags to figure out if it's an address or if
		 * the data is directly in the buffer field.
		 * the data is directly in the buffer field.
@@ -281,11 +281,12 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
		xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address);
		xhci_dbg(xhci, "DMA address or buffer contents= %llu\n", address);
		break;
		break;
	case TRB_TYPE(TRB_COMPLETION):
	case TRB_TYPE(TRB_COMPLETION):
		address = trb->event_cmd.cmd_trb;
		address = le64_to_cpu(trb->event_cmd.cmd_trb);
		xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
		xhci_dbg(xhci, "Command TRB pointer = %llu\n", address);
		xhci_dbg(xhci, "Completion status = %u\n",
		xhci_dbg(xhci, "Completion status = %u\n",
				(unsigned int) GET_COMP_CODE(trb->event_cmd.status));
			 (unsigned int) GET_COMP_CODE(le32_to_cpu(trb->event_cmd.status)));
		xhci_dbg(xhci, "Flags = 0x%x\n", (unsigned int) trb->event_cmd.flags);
		xhci_dbg(xhci, "Flags = 0x%x\n",
			 (unsigned int) le32_to_cpu(trb->event_cmd.flags));
		break;
		break;
	default:
	default:
		xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n",
		xhci_dbg(xhci, "Unknown TRB with TRB type ID %u\n",
@@ -311,16 +312,16 @@ void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb)
void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg)
{
{
	int i;
	int i;
	u32 addr = (u32) seg->dma;
	u64 addr = seg->dma;
	union xhci_trb *trb = seg->trbs;
	union xhci_trb *trb = seg->trbs;


	for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
	for (i = 0; i < TRBS_PER_SEGMENT; ++i) {
		trb = &seg->trbs[i];
		trb = &seg->trbs[i];
		xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n", addr,
		xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n", addr,
				lower_32_bits(trb->link.segment_ptr),
			 (u32)lower_32_bits(le64_to_cpu(trb->link.segment_ptr)),
				upper_32_bits(trb->link.segment_ptr),
			 (u32)upper_32_bits(le64_to_cpu(trb->link.segment_ptr)),
				(unsigned int) trb->link.intr_target,
			 (unsigned int) le32_to_cpu(trb->link.intr_target),
				(unsigned int) trb->link.control);
			 (unsigned int) le32_to_cpu(trb->link.control));
		addr += sizeof(*trb);
		addr += sizeof(*trb);
	}
	}
}
}
@@ -391,18 +392,18 @@ void xhci_dbg_ep_rings(struct xhci_hcd *xhci,


void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
{
{
	u32 addr = (u32) erst->erst_dma_addr;
	u64 addr = erst->erst_dma_addr;
	int i;
	int i;
	struct xhci_erst_entry *entry;
	struct xhci_erst_entry *entry;


	for (i = 0; i < erst->num_entries; ++i) {
	for (i = 0; i < erst->num_entries; ++i) {
		entry = &erst->entries[i];
		entry = &erst->entries[i];
		xhci_dbg(xhci, "@%08x %08x %08x %08x %08x\n",
		xhci_dbg(xhci, "@%016llx %08x %08x %08x %08x\n",
				(unsigned int) addr,
			 addr,
				lower_32_bits(entry->seg_addr),
			 lower_32_bits(le64_to_cpu(entry->seg_addr)),
				upper_32_bits(entry->seg_addr),
			 upper_32_bits(le64_to_cpu(entry->seg_addr)),
				(unsigned int) entry->seg_size,
			 (unsigned int) le32_to_cpu(entry->seg_size),
				(unsigned int) entry->rsvd);
			 (unsigned int) le32_to_cpu(entry->rsvd));
		addr += sizeof(*entry);
		addr += sizeof(*entry);
	}
	}
}
}
@@ -436,7 +437,7 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci,
{
{
	struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
	struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);


	switch (GET_SLOT_STATE(slot_ctx->dev_state)) {
	switch (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state))) {
	case 0:
	case 0:
		return "enabled/disabled";
		return "enabled/disabled";
	case 1:
	case 1:
+9 −9
Original line number Original line Diff line number Diff line
@@ -50,7 +50,7 @@ static void xhci_common_hub_descriptor(struct xhci_hcd *xhci,
	temp |= 0x0008;
	temp |= 0x0008;
	/* Bits 6:5 - no TTs in root ports */
	/* Bits 6:5 - no TTs in root ports */
	/* Bit  7 - no port indicators */
	/* Bit  7 - no port indicators */
	desc->wHubCharacteristics = (__force __u16) cpu_to_le16(temp);
	desc->wHubCharacteristics = cpu_to_le16(temp);
}
}


/* Fill in the USB 2.0 roothub descriptor */
/* Fill in the USB 2.0 roothub descriptor */
@@ -314,7 +314,7 @@ void xhci_ring_device(struct xhci_hcd *xhci, int slot_id)
}
}


static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
		u16 wIndex, u32 __iomem *addr, u32 port_status)
		u16 wIndex, __le32 __iomem *addr, u32 port_status)
{
{
	/* Don't allow the USB core to disable SuperSpeed ports. */
	/* Don't allow the USB core to disable SuperSpeed ports. */
	if (hcd->speed == HCD_USB3) {
	if (hcd->speed == HCD_USB3) {
@@ -331,7 +331,7 @@ static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
}
}


static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
		u16 wIndex, u32 __iomem *addr, u32 port_status)
		u16 wIndex, __le32 __iomem *addr, u32 port_status)
{
{
	char *port_change_bit;
	char *port_change_bit;
	u32 status;
	u32 status;
@@ -376,7 +376,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
	unsigned long flags;
	unsigned long flags;
	u32 temp, temp1, status;
	u32 temp, temp1, status;
	int retval = 0;
	int retval = 0;
	u32 __iomem **port_array;
	__le32 __iomem **port_array;
	int slot_id;
	int slot_id;
	struct xhci_bus_state *bus_state;
	struct xhci_bus_state *bus_state;


@@ -664,7 +664,7 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
	int i, retval;
	int i, retval;
	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
	int ports;
	int ports;
	u32 __iomem **port_array;
	__le32 __iomem **port_array;
	struct xhci_bus_state *bus_state;
	struct xhci_bus_state *bus_state;


	if (hcd->speed == HCD_USB3) {
	if (hcd->speed == HCD_USB3) {
@@ -709,7 +709,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
{
{
	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
	int max_ports, port_index;
	int max_ports, port_index;
	u32 __iomem **port_array;
	__le32 __iomem **port_array;
	struct xhci_bus_state *bus_state;
	struct xhci_bus_state *bus_state;
	unsigned long flags;
	unsigned long flags;


@@ -779,7 +779,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)


		if (DEV_HIGHSPEED(t1)) {
		if (DEV_HIGHSPEED(t1)) {
			/* enable remote wake up for USB 2.0 */
			/* enable remote wake up for USB 2.0 */
			u32 __iomem *addr;
			__le32 __iomem *addr;
			u32 tmp;
			u32 tmp;


			/* Add one to the port status register address to get
			/* Add one to the port status register address to get
@@ -801,7 +801,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
{
{
	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
	int max_ports, port_index;
	int max_ports, port_index;
	u32 __iomem **port_array;
	__le32 __iomem **port_array;
	struct xhci_bus_state *bus_state;
	struct xhci_bus_state *bus_state;
	u32 temp;
	u32 temp;
	unsigned long flags;
	unsigned long flags;
@@ -875,7 +875,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)


		if (DEV_HIGHSPEED(temp)) {
		if (DEV_HIGHSPEED(temp)) {
			/* disable remote wake up for USB 2.0 */
			/* disable remote wake up for USB 2.0 */
			u32 __iomem *addr;
			__le32 __iomem *addr;
			u32 tmp;
			u32 tmp;


			/* Add one to the port status register address to get
			/* Add one to the port status register address to get
+61 −61
Original line number Original line Diff line number Diff line
@@ -89,16 +89,17 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
		return;
		return;
	prev->next = next;
	prev->next = next;
	if (link_trbs) {
	if (link_trbs) {
		prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma;
		prev->trbs[TRBS_PER_SEGMENT-1].link.
			segment_ptr = cpu_to_le64(next->dma);


		/* Set the last TRB in the segment to have a TRB type ID of Link TRB */
		/* Set the last TRB in the segment to have a TRB type ID of Link TRB */
		val = prev->trbs[TRBS_PER_SEGMENT-1].link.control;
		val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
		val &= ~TRB_TYPE_BITMASK;
		val &= ~TRB_TYPE_BITMASK;
		val |= TRB_TYPE(TRB_LINK);
		val |= TRB_TYPE(TRB_LINK);
		/* Always set the chain bit with 0.95 hardware */
		/* Always set the chain bit with 0.95 hardware */
		if (xhci_link_trb_quirk(xhci))
		if (xhci_link_trb_quirk(xhci))
			val |= TRB_CHAIN;
			val |= TRB_CHAIN;
		prev->trbs[TRBS_PER_SEGMENT-1].link.control = val;
		prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
	}
	}
	xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
	xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
			(unsigned long long)prev->dma,
			(unsigned long long)prev->dma,
@@ -186,7 +187,8 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,


	if (link_trbs) {
	if (link_trbs) {
		/* See section 4.9.2.1 and 6.4.4.1 */
		/* See section 4.9.2.1 and 6.4.4.1 */
		prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE);
		prev->trbs[TRBS_PER_SEGMENT-1].link.
			control |= cpu_to_le32(LINK_TOGGLE);
		xhci_dbg(xhci, "Wrote link toggle flag to"
		xhci_dbg(xhci, "Wrote link toggle flag to"
				" segment %p (virtual), 0x%llx (DMA)\n",
				" segment %p (virtual), 0x%llx (DMA)\n",
				prev, (unsigned long long)prev->dma);
				prev, (unsigned long long)prev->dma);
@@ -548,7 +550,8 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
		addr = cur_ring->first_seg->dma |
		addr = cur_ring->first_seg->dma |
			SCT_FOR_CTX(SCT_PRI_TR) |
			SCT_FOR_CTX(SCT_PRI_TR) |
			cur_ring->cycle_state;
			cur_ring->cycle_state;
		stream_info->stream_ctx_array[cur_stream].stream_ring = addr;
		stream_info->stream_ctx_array[cur_stream].
			stream_ring = cpu_to_le64(addr);
		xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
		xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n",
				cur_stream, (unsigned long long) addr);
				cur_stream, (unsigned long long) addr);


@@ -614,10 +617,10 @@ void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
	max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
	max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
	xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n",
	xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n",
			1 << (max_primary_streams + 1));
			1 << (max_primary_streams + 1));
	ep_ctx->ep_info &= ~EP_MAXPSTREAMS_MASK;
	ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
	ep_ctx->ep_info |= EP_MAXPSTREAMS(max_primary_streams);
	ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
	ep_ctx->ep_info |= EP_HAS_LSA;
				       | EP_HAS_LSA);
	ep_ctx->deq  = stream_info->ctx_array_dma;
	ep_ctx->deq  = cpu_to_le64(stream_info->ctx_array_dma);
}
}


/*
/*
@@ -630,10 +633,9 @@ void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
		struct xhci_virt_ep *ep)
		struct xhci_virt_ep *ep)
{
{
	dma_addr_t addr;
	dma_addr_t addr;
	ep_ctx->ep_info &= ~EP_MAXPSTREAMS_MASK;
	ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA));
	ep_ctx->ep_info &= ~EP_HAS_LSA;
	addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
	addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue);
	ep_ctx->deq  = addr | ep->ring->cycle_state;
	ep_ctx->deq  = cpu_to_le64(addr | ep->ring->cycle_state);
}
}


/* Frees all stream contexts associated with the endpoint,
/* Frees all stream contexts associated with the endpoint,
@@ -781,11 +783,11 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
	dev->udev = udev;
	dev->udev = udev;


	/* Point to output device context in dcbaa. */
	/* Point to output device context in dcbaa. */
	xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma;
	xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma);
	xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
	xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n",
		 slot_id,
		 slot_id,
		 &xhci->dcbaa->dev_context_ptrs[slot_id],
		 &xhci->dcbaa->dev_context_ptrs[slot_id],
			(unsigned long long) xhci->dcbaa->dev_context_ptrs[slot_id]);
		 (unsigned long long) le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));


	return 1;
	return 1;
fail:
fail:
@@ -810,8 +812,9 @@ void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
	 * configured device has reset, so all control transfers should have
	 * configured device has reset, so all control transfers should have
	 * been completed or cancelled before the reset.
	 * been completed or cancelled before the reset.
	 */
	 */
	ep0_ctx->deq = xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue);
	ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
	ep0_ctx->deq |= ep_ring->cycle_state;
							ep_ring->enqueue)
				   | ep_ring->cycle_state);
}
}


/*
/*
@@ -885,24 +888,22 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
	slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);
	slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx);


	/* 2) New slot context and endpoint 0 context are valid*/
	/* 2) New slot context and endpoint 0 context are valid*/
	ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG;
	ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);


	/* 3) Only the control endpoint is valid - one endpoint context */
	/* 3) Only the control endpoint is valid - one endpoint context */
	slot_ctx->dev_info |= LAST_CTX(1);
	slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | (u32) udev->route);

	slot_ctx->dev_info |= (u32) udev->route;
	switch (udev->speed) {
	switch (udev->speed) {
	case USB_SPEED_SUPER:
	case USB_SPEED_SUPER:
		slot_ctx->dev_info |= (u32) SLOT_SPEED_SS;
		slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_SS);
		break;
		break;
	case USB_SPEED_HIGH:
	case USB_SPEED_HIGH:
		slot_ctx->dev_info |= (u32) SLOT_SPEED_HS;
		slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_HS);
		break;
		break;
	case USB_SPEED_FULL:
	case USB_SPEED_FULL:
		slot_ctx->dev_info |= (u32) SLOT_SPEED_FS;
		slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_FS);
		break;
		break;
	case USB_SPEED_LOW:
	case USB_SPEED_LOW:
		slot_ctx->dev_info |= (u32) SLOT_SPEED_LS;
		slot_ctx->dev_info |= cpu_to_le32((u32) SLOT_SPEED_LS);
		break;
		break;
	case USB_SPEED_WIRELESS:
	case USB_SPEED_WIRELESS:
		xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
		xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
@@ -916,7 +917,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
	port_num = xhci_find_real_port_number(xhci, udev);
	port_num = xhci_find_real_port_number(xhci, udev);
	if (!port_num)
	if (!port_num)
		return -EINVAL;
		return -EINVAL;
	slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(port_num);
	slot_ctx->dev_info2 |= cpu_to_le32((u32) ROOT_HUB_PORT(port_num));
	/* Set the port number in the virtual_device to the faked port number */
	/* Set the port number in the virtual_device to the faked port number */
	for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
	for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
			top_dev = top_dev->parent)
			top_dev = top_dev->parent)
@@ -927,31 +928,31 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud


	/* Is this a LS/FS device under an external HS hub? */
	/* Is this a LS/FS device under an external HS hub? */
	if (udev->tt && udev->tt->hub->parent) {
	if (udev->tt && udev->tt->hub->parent) {
		slot_ctx->tt_info = udev->tt->hub->slot_id;
		slot_ctx->tt_info = cpu_to_le32(udev->tt->hub->slot_id |
		slot_ctx->tt_info |= udev->ttport << 8;
						(udev->ttport << 8));
		if (udev->tt->multi)
		if (udev->tt->multi)
			slot_ctx->dev_info |= DEV_MTT;
			slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
	}
	}
	xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
	xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
	xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
	xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);


	/* Step 4 - ring already allocated */
	/* Step 4 - ring already allocated */
	/* Step 5 */
	/* Step 5 */
	ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP);
	ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
	/*
	/*
	 * XXX: Not sure about wireless USB devices.
	 * XXX: Not sure about wireless USB devices.
	 */
	 */
	switch (udev->speed) {
	switch (udev->speed) {
	case USB_SPEED_SUPER:
	case USB_SPEED_SUPER:
		ep0_ctx->ep_info2 |= MAX_PACKET(512);
		ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(512));
		break;
		break;
	case USB_SPEED_HIGH:
	case USB_SPEED_HIGH:
	/* USB core guesses at a 64-byte max packet first for FS devices */
	/* USB core guesses at a 64-byte max packet first for FS devices */
	case USB_SPEED_FULL:
	case USB_SPEED_FULL:
		ep0_ctx->ep_info2 |= MAX_PACKET(64);
		ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(64));
		break;
		break;
	case USB_SPEED_LOW:
	case USB_SPEED_LOW:
		ep0_ctx->ep_info2 |= MAX_PACKET(8);
		ep0_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(8));
		break;
		break;
	case USB_SPEED_WIRELESS:
	case USB_SPEED_WIRELESS:
		xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
		xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
@@ -962,12 +963,10 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
		BUG();
		BUG();
	}
	}
	/* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
	/* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
	ep0_ctx->ep_info2 |= MAX_BURST(0);
	ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3));
	ep0_ctx->ep_info2 |= ERROR_COUNT(3);


	ep0_ctx->deq =
	ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma |
		dev->eps[0].ring->first_seg->dma;
				   dev->eps[0].ring->cycle_state);
	ep0_ctx->deq |= dev->eps[0].ring->cycle_state;


	/* Steps 7 and 8 were done in xhci_alloc_virt_device() */
	/* Steps 7 and 8 were done in xhci_alloc_virt_device() */


@@ -1133,8 +1132,8 @@ static u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
	if (udev->speed == USB_SPEED_SUPER)
	if (udev->speed == USB_SPEED_SUPER)
		return ep->ss_ep_comp.wBytesPerInterval;
		return ep->ss_ep_comp.wBytesPerInterval;


	max_packet = GET_MAX_PACKET(ep->desc.wMaxPacketSize);
	max_packet = GET_MAX_PACKET(le16_to_cpu(ep->desc.wMaxPacketSize));
	max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
	max_burst = (le16_to_cpu(ep->desc.wMaxPacketSize) & 0x1800) >> 11;
	/* A 0 in max burst means 1 transfer per ESIT */
	/* A 0 in max burst means 1 transfer per ESIT */
	return max_packet * (max_burst + 1);
	return max_packet * (max_burst + 1);
}
}
@@ -1183,10 +1182,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
	}
	}
	virt_dev->eps[ep_index].skip = false;
	virt_dev->eps[ep_index].skip = false;
	ep_ring = virt_dev->eps[ep_index].new_ring;
	ep_ring = virt_dev->eps[ep_index].new_ring;
	ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
	ep_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma | ep_ring->cycle_state);


	ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
	ep_ctx->ep_info = cpu_to_le32(xhci_get_endpoint_interval(udev, ep)
	ep_ctx->ep_info |= EP_MULT(xhci_get_endpoint_mult(udev, ep));
				      | EP_MULT(xhci_get_endpoint_mult(udev, ep)));


	/* FIXME dig Mult and streams info out of ep companion desc */
	/* FIXME dig Mult and streams info out of ep companion desc */


@@ -1194,22 +1193,22 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
	 * error count = 0 means infinite retries.
	 * error count = 0 means infinite retries.
	 */
	 */
	if (!usb_endpoint_xfer_isoc(&ep->desc))
	if (!usb_endpoint_xfer_isoc(&ep->desc))
		ep_ctx->ep_info2 = ERROR_COUNT(3);
		ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(3));
	else
	else
		ep_ctx->ep_info2 = ERROR_COUNT(1);
		ep_ctx->ep_info2 = cpu_to_le32(ERROR_COUNT(1));


	ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep);
	ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep));


	/* Set the max packet size and max burst */
	/* Set the max packet size and max burst */
	switch (udev->speed) {
	switch (udev->speed) {
	case USB_SPEED_SUPER:
	case USB_SPEED_SUPER:
		max_packet = ep->desc.wMaxPacketSize;
		max_packet = le16_to_cpu(ep->desc.wMaxPacketSize);
		ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
		ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
		/* dig out max burst from ep companion desc */
		/* dig out max burst from ep companion desc */
		max_packet = ep->ss_ep_comp.bMaxBurst;
		max_packet = ep->ss_ep_comp.bMaxBurst;
		if (!max_packet)
		if (!max_packet)
			xhci_warn(xhci, "WARN no SS endpoint bMaxBurst\n");
			xhci_warn(xhci, "WARN no SS endpoint bMaxBurst\n");
		ep_ctx->ep_info2 |= MAX_BURST(max_packet);
		ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet));
		break;
		break;
	case USB_SPEED_HIGH:
	case USB_SPEED_HIGH:
		/* bits 11:12 specify the number of additional transaction
		/* bits 11:12 specify the number of additional transaction
@@ -1217,20 +1216,21 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
		 */
		 */
		if (usb_endpoint_xfer_isoc(&ep->desc) ||
		if (usb_endpoint_xfer_isoc(&ep->desc) ||
				usb_endpoint_xfer_int(&ep->desc)) {
				usb_endpoint_xfer_int(&ep->desc)) {
			max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
			max_burst = (le16_to_cpu(ep->desc.wMaxPacketSize)
			ep_ctx->ep_info2 |= MAX_BURST(max_burst);
				     & 0x1800) >> 11;
			ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst));
		}
		}
		/* Fall through */
		/* Fall through */
	case USB_SPEED_FULL:
	case USB_SPEED_FULL:
	case USB_SPEED_LOW:
	case USB_SPEED_LOW:
		max_packet = GET_MAX_PACKET(ep->desc.wMaxPacketSize);
		max_packet = GET_MAX_PACKET(le16_to_cpu(ep->desc.wMaxPacketSize));
		ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
		ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
		break;
		break;
	default:
	default:
		BUG();
		BUG();
	}
	}
	max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
	max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
	ep_ctx->tx_info = MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload);
	ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));


	/*
	/*
	 * XXX no idea how to calculate the average TRB buffer length for bulk
	 * XXX no idea how to calculate the average TRB buffer length for bulk
@@ -1247,7 +1247,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
	 * use Event Data TRBs, and we don't chain in a link TRB on short
	 * use Event Data TRBs, and we don't chain in a link TRB on short
	 * transfers, we're basically dividing by 1.
	 * transfers, we're basically dividing by 1.
	 */
	 */
	ep_ctx->tx_info |= AVG_TRB_LENGTH_FOR_EP(max_esit_payload);
	ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(max_esit_payload));


	/* FIXME Debug endpoint context */
	/* FIXME Debug endpoint context */
	return 0;
	return 0;
@@ -1347,7 +1347,7 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
	if (!xhci->scratchpad->sp_dma_buffers)
	if (!xhci->scratchpad->sp_dma_buffers)
		goto fail_sp4;
		goto fail_sp4;


	xhci->dcbaa->dev_context_ptrs[0] = xhci->scratchpad->sp_dma;
	xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
	for (i = 0; i < num_sp; i++) {
	for (i = 0; i < num_sp; i++) {
		dma_addr_t dma;
		dma_addr_t dma;
		void *buf = pci_alloc_consistent(to_pci_dev(dev),
		void *buf = pci_alloc_consistent(to_pci_dev(dev),
@@ -1724,7 +1724,7 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
}
}


static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
		u32 __iomem *addr, u8 major_revision)
		__le32 __iomem *addr, u8 major_revision)
{
{
	u32 temp, port_offset, port_count;
	u32 temp, port_offset, port_count;
	int i;
	int i;
@@ -1789,7 +1789,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
 */
 */
static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
{
{
	u32 __iomem *addr;
	__le32 __iomem *addr;
	u32 offset;
	u32 offset;
	unsigned int num_ports;
	unsigned int num_ports;
	int i, port_index;
	int i, port_index;
@@ -2042,8 +2042,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
	/* set ring base address and size for each segment table entry */
	/* set ring base address and size for each segment table entry */
	for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
	for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
		struct xhci_erst_entry *entry = &xhci->erst.entries[val];
		struct xhci_erst_entry *entry = &xhci->erst.entries[val];
		entry->seg_addr = seg->dma;
		entry->seg_addr = cpu_to_le64(seg->dma);
		entry->seg_size = TRBS_PER_SEGMENT;
		entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
		entry->rsvd = 0;
		entry->rsvd = 0;
		seg = seg->next;
		seg = seg->next;
	}
	}
+140 −127

File changed.

Preview size limit exceeded, changes collapsed.

+57 −52
Original line number Original line Diff line number Diff line
@@ -973,8 +973,8 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,


	out_ctx = xhci->devs[slot_id]->out_ctx;
	out_ctx = xhci->devs[slot_id]->out_ctx;
	ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
	ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
	hw_max_packet_size = MAX_PACKET_DECODED(ep_ctx->ep_info2);
	hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
	max_packet_size = urb->dev->ep0.desc.wMaxPacketSize;
	max_packet_size = le16_to_cpu(urb->dev->ep0.desc.wMaxPacketSize);
	if (hw_max_packet_size != max_packet_size) {
	if (hw_max_packet_size != max_packet_size) {
		xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
		xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
		xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
		xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
@@ -988,15 +988,15 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
				xhci->devs[slot_id]->out_ctx, ep_index);
				xhci->devs[slot_id]->out_ctx, ep_index);
		in_ctx = xhci->devs[slot_id]->in_ctx;
		in_ctx = xhci->devs[slot_id]->in_ctx;
		ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
		ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
		ep_ctx->ep_info2 &= ~MAX_PACKET_MASK;
		ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
		ep_ctx->ep_info2 |= MAX_PACKET(max_packet_size);
		ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));


		/* Set up the input context flags for the command */
		/* Set up the input context flags for the command */
		/* FIXME: This won't work if a non-default control endpoint
		/* FIXME: This won't work if a non-default control endpoint
		 * changes max packet sizes.
		 * changes max packet sizes.
		 */
		 */
		ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
		ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
		ctrl_ctx->add_flags = EP0_FLAG;
		ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
		ctrl_ctx->drop_flags = 0;
		ctrl_ctx->drop_flags = 0;


		xhci_dbg(xhci, "Slot %d input context\n", slot_id);
		xhci_dbg(xhci, "Slot %d input context\n", slot_id);
@@ -1010,7 +1010,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
		/* Clean up the input context for later use by bandwidth
		/* Clean up the input context for later use by bandwidth
		 * functions.
		 * functions.
		 */
		 */
		ctrl_ctx->add_flags = SLOT_FLAG;
		ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
	}
	}
	return ret;
	return ret;
}
}
@@ -1331,27 +1331,30 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
	/* If the HC already knows the endpoint is disabled,
	/* If the HC already knows the endpoint is disabled,
	 * or the HCD has noted it is disabled, ignore this request
	 * or the HCD has noted it is disabled, ignore this request
	 */
	 */
	if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED ||
	if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
			ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
	    EP_STATE_DISABLED ||
	    le32_to_cpu(ctrl_ctx->drop_flags) &
	    xhci_get_endpoint_flag(&ep->desc)) {
		xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
		xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
				__func__, ep);
				__func__, ep);
		return 0;
		return 0;
	}
	}


	ctrl_ctx->drop_flags |= drop_flag;
	ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
	new_drop_flags = ctrl_ctx->drop_flags;
	new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);


	ctrl_ctx->add_flags &= ~drop_flag;
	ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
	new_add_flags = ctrl_ctx->add_flags;
	new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);


	last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags);
	last_ctx = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags));
	slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
	slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
	/* Update the last valid endpoint context, if we deleted the last one */
	/* Update the last valid endpoint context, if we deleted the last one */
	if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) {
	if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) >
		slot_ctx->dev_info &= ~LAST_CTX_MASK;
	    LAST_CTX(last_ctx)) {
		slot_ctx->dev_info |= LAST_CTX(last_ctx);
		slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
		slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
	}
	}
	new_slot_info = slot_ctx->dev_info;
	new_slot_info = le32_to_cpu(slot_ctx->dev_info);


	xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
	xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);


@@ -1419,7 +1422,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
	/* If the HCD has already noted the endpoint is enabled,
	/* If the HCD has already noted the endpoint is enabled,
	 * ignore this request.
	 * ignore this request.
	 */
	 */
	if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
	if (le32_to_cpu(ctrl_ctx->add_flags) &
	    xhci_get_endpoint_flag(&ep->desc)) {
		xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
		xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
				__func__, ep);
				__func__, ep);
		return 0;
		return 0;
@@ -1437,8 +1441,8 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
		return -ENOMEM;
		return -ENOMEM;
	}
	}


	ctrl_ctx->add_flags |= added_ctxs;
	ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
	new_add_flags = ctrl_ctx->add_flags;
	new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);


	/* If xhci_endpoint_disable() was called for this endpoint, but the
	/* If xhci_endpoint_disable() was called for this endpoint, but the
	 * xHC hasn't been notified yet through the check_bandwidth() call,
	 * xHC hasn't been notified yet through the check_bandwidth() call,
@@ -1446,15 +1450,16 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
	 * descriptors.  We must drop and re-add this endpoint, so we leave the
	 * descriptors.  We must drop and re-add this endpoint, so we leave the
	 * drop flags alone.
	 * drop flags alone.
	 */
	 */
	new_drop_flags = ctrl_ctx->drop_flags;
	new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);


	slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
	slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
	/* Update the last valid endpoint context, if we just added one past */
	/* Update the last valid endpoint context, if we just added one past */
	if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) {
	if ((le32_to_cpu(slot_ctx->dev_info) & LAST_CTX_MASK) <
		slot_ctx->dev_info &= ~LAST_CTX_MASK;
	    LAST_CTX(last_ctx)) {
		slot_ctx->dev_info |= LAST_CTX(last_ctx);
		slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
		slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(last_ctx));
	}
	}
	new_slot_info = slot_ctx->dev_info;
	new_slot_info = le32_to_cpu(slot_ctx->dev_info);


	/* Store the usb_device pointer for later use */
	/* Store the usb_device pointer for later use */
	ep->hcpriv = udev;
	ep->hcpriv = udev;
@@ -1484,9 +1489,9 @@ static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *vir
	ctrl_ctx->drop_flags = 0;
	ctrl_ctx->drop_flags = 0;
	ctrl_ctx->add_flags = 0;
	ctrl_ctx->add_flags = 0;
	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
	slot_ctx->dev_info &= ~LAST_CTX_MASK;
	slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
	/* Endpoint 0 is always valid */
	/* Endpoint 0 is always valid */
	slot_ctx->dev_info |= LAST_CTX(1);
	slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
	for (i = 1; i < 31; ++i) {
	for (i = 1; i < 31; ++i) {
		ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
		ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
		ep_ctx->ep_info = 0;
		ep_ctx->ep_info = 0;
@@ -1581,7 +1586,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
	unsigned long flags;
	unsigned long flags;
	struct xhci_container_ctx *in_ctx;
	struct xhci_container_ctx *in_ctx;
	struct completion *cmd_completion;
	struct completion *cmd_completion;
	int *cmd_status;
	u32 *cmd_status;
	struct xhci_virt_device *virt_dev;
	struct xhci_virt_device *virt_dev;


	spin_lock_irqsave(&xhci->lock, flags);
	spin_lock_irqsave(&xhci->lock, flags);
@@ -1595,8 +1600,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
		/* Enqueue pointer can be left pointing to the link TRB,
		/* Enqueue pointer can be left pointing to the link TRB,
		 * we must handle that
		 * we must handle that
		 */
		 */
		if ((command->command_trb->link.control & TRB_TYPE_BITMASK)
		if ((le32_to_cpu(command->command_trb->link.control)
				== TRB_TYPE(TRB_LINK))
		     & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK))
			command->command_trb =
			command->command_trb =
				xhci->cmd_ring->enq_seg->next->trbs;
				xhci->cmd_ring->enq_seg->next->trbs;


@@ -1672,14 +1677,13 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)


	/* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
	/* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
	ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
	ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
	ctrl_ctx->add_flags |= SLOT_FLAG;
	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
	ctrl_ctx->add_flags &= ~EP0_FLAG;
	ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
	ctrl_ctx->drop_flags &= ~SLOT_FLAG;
	ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
	ctrl_ctx->drop_flags &= ~EP0_FLAG;
	xhci_dbg(xhci, "New Input Control Context:\n");
	xhci_dbg(xhci, "New Input Control Context:\n");
	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
	xhci_dbg_ctx(xhci, virt_dev->in_ctx,
	xhci_dbg_ctx(xhci, virt_dev->in_ctx,
			LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
		     LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));


	ret = xhci_configure_endpoint(xhci, udev, NULL,
	ret = xhci_configure_endpoint(xhci, udev, NULL,
			false, false);
			false, false);
@@ -1690,7 +1694,7 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)


	xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
	xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
	xhci_dbg_ctx(xhci, virt_dev->out_ctx,
	xhci_dbg_ctx(xhci, virt_dev->out_ctx,
			LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
		     LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info)));


	xhci_zero_in_ctx(xhci, virt_dev);
	xhci_zero_in_ctx(xhci, virt_dev);
	/* Install new rings and free or cache any old rings */
	/* Install new rings and free or cache any old rings */
@@ -1740,10 +1744,10 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
{
{
	struct xhci_input_control_ctx *ctrl_ctx;
	struct xhci_input_control_ctx *ctrl_ctx;
	ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
	ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
	ctrl_ctx->add_flags = add_flags;
	ctrl_ctx->add_flags = cpu_to_le32(add_flags);
	ctrl_ctx->drop_flags = drop_flags;
	ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
	xhci_slot_copy(xhci, in_ctx, out_ctx);
	xhci_slot_copy(xhci, in_ctx, out_ctx);
	ctrl_ctx->add_flags |= SLOT_FLAG;
	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);


	xhci_dbg(xhci, "Input Context:\n");
	xhci_dbg(xhci, "Input Context:\n");
	xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
	xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
@@ -1772,7 +1776,7 @@ static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
				deq_state->new_deq_ptr);
				deq_state->new_deq_ptr);
		return;
		return;
	}
	}
	ep_ctx->deq = addr | deq_state->new_cycle_state;
	ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);


	added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
	added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
	xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
	xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
@@ -2327,8 +2331,8 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
	/* Enqueue pointer can be left pointing to the link TRB,
	/* Enqueue pointer can be left pointing to the link TRB,
	 * we must handle that
	 * we must handle that
	 */
	 */
	if ((reset_device_cmd->command_trb->link.control & TRB_TYPE_BITMASK)
	if ((le32_to_cpu(reset_device_cmd->command_trb->link.control)
			== TRB_TYPE(TRB_LINK))
	     & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK))
		reset_device_cmd->command_trb =
		reset_device_cmd->command_trb =
			xhci->cmd_ring->enq_seg->next->trbs;
			xhci->cmd_ring->enq_seg->next->trbs;


@@ -2612,7 +2616,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
		 udev->slot_id,
		 udev->slot_id,
		 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
		 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
		 (unsigned long long)
		 (unsigned long long)
				xhci->dcbaa->dev_context_ptrs[udev->slot_id]);
		 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
	xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
	xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
			(unsigned long long)virt_dev->out_ctx->dma);
			(unsigned long long)virt_dev->out_ctx->dma);
	xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
	xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
@@ -2626,7 +2630,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
	slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
	/* Use kernel assigned address for devices; store xHC assigned
	/* Use kernel assigned address for devices; store xHC assigned
	 * address locally. */
	 * address locally. */
	virt_dev->address = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1;
	virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK)
		+ 1;
	/* Zero the input context control for later use */
	/* Zero the input context control for later use */
	ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
	ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
	ctrl_ctx->add_flags = 0;
	ctrl_ctx->add_flags = 0;
@@ -2670,16 +2675,16 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
	spin_lock_irqsave(&xhci->lock, flags);
	spin_lock_irqsave(&xhci->lock, flags);
	xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
	xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
	ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
	ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
	ctrl_ctx->add_flags |= SLOT_FLAG;
	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
	slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
	slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
	slot_ctx->dev_info |= DEV_HUB;
	slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
	if (tt->multi)
	if (tt->multi)
		slot_ctx->dev_info |= DEV_MTT;
		slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
	if (xhci->hci_version > 0x95) {
	if (xhci->hci_version > 0x95) {
		xhci_dbg(xhci, "xHCI version %x needs hub "
		xhci_dbg(xhci, "xHCI version %x needs hub "
				"TT think time and number of ports\n",
				"TT think time and number of ports\n",
				(unsigned int) xhci->hci_version);
				(unsigned int) xhci->hci_version);
		slot_ctx->dev_info2 |= XHCI_MAX_PORTS(hdev->maxchild);
		slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
		/* Set TT think time - convert from ns to FS bit times.
		/* Set TT think time - convert from ns to FS bit times.
		 * 0 = 8 FS bit times, 1 = 16 FS bit times,
		 * 0 = 8 FS bit times, 1 = 16 FS bit times,
		 * 2 = 24 FS bit times, 3 = 32 FS bit times.
		 * 2 = 24 FS bit times, 3 = 32 FS bit times.
@@ -2687,7 +2692,7 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
		think_time = tt->think_time;
		think_time = tt->think_time;
		if (think_time != 0)
		if (think_time != 0)
			think_time = (think_time / 666) - 1;
			think_time = (think_time / 666) - 1;
		slot_ctx->tt_info |= TT_THINK_TIME(think_time);
		slot_ctx->tt_info |= cpu_to_le32(TT_THINK_TIME(think_time));
	} else {
	} else {
		xhci_dbg(xhci, "xHCI version %x doesn't need hub "
		xhci_dbg(xhci, "xHCI version %x doesn't need hub "
				"TT think time or number of ports\n",
				"TT think time or number of ports\n",
Loading