Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 90221170 authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman
Browse files

Merge tag 'for-usb-next-2012-03-13' of...

Merge tag 'for-usb-next-2012-03-13' of git://git.kernel.org/pub/scm/linux/kernel/git/sarah/xhci into usb-next

Hi Greg,

Here's my final pull request for 3.4.  All the patches have been under
review for some time (months in some cases).  The ring expansion patches
in particular have been tested by both me and Paul Zimmerman from
Synopsis.

They add support for:
 - Dynamic ring expansion
 - New USB 2.1 link PM errata (BESL)
 - xHCI host controller support for the Synopsis DesignWare 3 IP

The dynamic ring expansion patches finally make test 10 of the host-side
test pass, instead of failing due to no room on the endpoint ring for
the larger transfers.  I would have hoped that the ring expansion
patchset would make the Point Grey USB 3.0 camera work, but sadly it
fails to respond to a control transfer on my test system.  This doesn't
seem to be a driver bug, but it could be a device or host bug.

Felipe has tested the patches to add a platform device to the xHCI
driver on the Synopsis DesignWare 3 IP in the TI OMAP5 board.

Please pull.

Thanks,
Sarah Sharp
parents f7a0d426 3429e91a
Loading
Loading
Loading
Loading
+4 −0
Original line number Original line Diff line number Diff line
@@ -27,6 +27,10 @@ config USB_XHCI_HCD
	  To compile this driver as a module, choose M here: the
	  To compile this driver as a module, choose M here: the
	  module will be called xhci-hcd.
	  module will be called xhci-hcd.


config USB_XHCI_PLATFORM
	tristate
	depends on USB_XHCI_HCD

config USB_XHCI_HCD_DEBUGGING
config USB_XHCI_HCD_DEBUGGING
	bool "Debugging for the xHCI host controller"
	bool "Debugging for the xHCI host controller"
	depends on USB_XHCI_HCD
	depends on USB_XHCI_HCD
+4 −0
Original line number Original line Diff line number Diff line
@@ -15,6 +15,10 @@ xhci-hcd-y := xhci.o xhci-mem.o
xhci-hcd-y += xhci-ring.o xhci-hub.o xhci-dbg.o
xhci-hcd-y += xhci-ring.o xhci-hub.o xhci-dbg.o
xhci-hcd-$(CONFIG_PCI)	+= xhci-pci.o
xhci-hcd-$(CONFIG_PCI)	+= xhci-pci.o


ifneq ($(CONFIG_USB_XHCI_PLATFORM), )
	xhci-hcd-y		+= xhci-plat.o
endif

obj-$(CONFIG_USB_WHCI_HCD)	+= whci/
obj-$(CONFIG_USB_WHCI_HCD)	+= whci/


obj-$(CONFIG_PCI)		+= pci-quirks.o
obj-$(CONFIG_PCI)		+= pci-quirks.o
+168 −62
Original line number Original line Diff line number Diff line
@@ -34,10 +34,12 @@
 * Section 4.11.1.1:
 * Section 4.11.1.1:
 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
 * "All components of all Command and Transfer TRBs shall be initialized to '0'"
 */
 */
static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags)
static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
					unsigned int cycle_state, gfp_t flags)
{
{
	struct xhci_segment *seg;
	struct xhci_segment *seg;
	dma_addr_t	dma;
	dma_addr_t	dma;
	int		i;


	seg = kzalloc(sizeof *seg, flags);
	seg = kzalloc(sizeof *seg, flags);
	if (!seg)
	if (!seg)
@@ -50,6 +52,11 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flag
	}
	}


	memset(seg->trbs, 0, SEGMENT_SIZE);
	memset(seg->trbs, 0, SEGMENT_SIZE);
	/* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
	if (cycle_state == 0) {
		for (i = 0; i < TRBS_PER_SEGMENT; i++)
			seg->trbs[i].link.control |= TRB_CYCLE;
	}
	seg->dma = dma;
	seg->dma = dma;
	seg->next = NULL;
	seg->next = NULL;


@@ -65,6 +72,20 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
	kfree(seg);
	kfree(seg);
}
}


static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
				struct xhci_segment *first)
{
	struct xhci_segment *seg;

	seg = first->next;
	while (seg != first) {
		struct xhci_segment *next = seg->next;
		xhci_segment_free(xhci, seg);
		seg = next;
	}
	xhci_segment_free(xhci, first);
}

/*
/*
 * Make the prev segment point to the next segment.
 * Make the prev segment point to the next segment.
 *
 *
@@ -73,14 +94,14 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
 * related flags, such as End TRB, Toggle Cycle, and no snoop.
 * related flags, such as End TRB, Toggle Cycle, and no snoop.
 */
 */
static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
		struct xhci_segment *next, bool link_trbs, bool isoc)
		struct xhci_segment *next, enum xhci_ring_type type)
{
{
	u32 val;
	u32 val;


	if (!prev || !next)
	if (!prev || !next)
		return;
		return;
	prev->next = next;
	prev->next = next;
	if (link_trbs) {
	if (type != TYPE_EVENT) {
		prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
		prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr =
			cpu_to_le64(next->dma);
			cpu_to_le64(next->dma);


@@ -91,35 +112,55 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
		/* Always set the chain bit with 0.95 hardware */
		/* Always set the chain bit with 0.95 hardware */
		/* Set chain bit for isoc rings on AMD 0.96 host */
		/* Set chain bit for isoc rings on AMD 0.96 host */
		if (xhci_link_trb_quirk(xhci) ||
		if (xhci_link_trb_quirk(xhci) ||
				(isoc && (xhci->quirks & XHCI_AMD_0x96_HOST)))
				(type == TYPE_ISOC &&
				 (xhci->quirks & XHCI_AMD_0x96_HOST)))
			val |= TRB_CHAIN;
			val |= TRB_CHAIN;
		prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
		prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
	}
	}
}
}


/* XXX: Do we need the hcd structure in all these functions? */
/*
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
 * Link the ring to the new segments.
 * Set Toggle Cycle for the new ring if needed.
 */
static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
		struct xhci_segment *first, struct xhci_segment *last,
		unsigned int num_segs)
{
{
	struct xhci_segment *seg;
	struct xhci_segment *next;
	struct xhci_segment *first_seg;


	if (!ring)
	if (!ring || !first || !last)
		return;
		return;
	if (ring->first_seg) {

		first_seg = ring->first_seg;
	next = ring->enq_seg->next;
		seg = first_seg->next;
	xhci_link_segments(xhci, ring->enq_seg, first, ring->type);
		while (seg != first_seg) {
	xhci_link_segments(xhci, last, next, ring->type);
			struct xhci_segment *next = seg->next;
	ring->num_segs += num_segs;
			xhci_segment_free(xhci, seg);
	ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
			seg = next;

	if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
		ring->last_seg->trbs[TRBS_PER_SEGMENT-1].link.control
			&= ~cpu_to_le32(LINK_TOGGLE);
		last->trbs[TRBS_PER_SEGMENT-1].link.control
			|= cpu_to_le32(LINK_TOGGLE);
		ring->last_seg = last;
	}
	}
		xhci_segment_free(xhci, first_seg);
		ring->first_seg = NULL;
}
}

/* XXX: Do we need the hcd structure in all these functions? */
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
	if (!ring)
		return;

	if (ring->first_seg)
		xhci_free_segments_for_ring(xhci, ring->first_seg);

	kfree(ring);
	kfree(ring);
}
}


static void xhci_initialize_ring_info(struct xhci_ring *ring)
static void xhci_initialize_ring_info(struct xhci_ring *ring,
					unsigned int cycle_state)
{
{
	/* The ring is empty, so the enqueue pointer == dequeue pointer */
	/* The ring is empty, so the enqueue pointer == dequeue pointer */
	ring->enqueue = ring->first_seg->trbs;
	ring->enqueue = ring->first_seg->trbs;
@@ -129,11 +170,53 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring)
	/* The ring is initialized to 0. The producer must write 1 to the cycle
	/* The ring is initialized to 0. The producer must write 1 to the cycle
	 * bit to handover ownership of the TRB, so PCS = 1.  The consumer must
	 * bit to handover ownership of the TRB, so PCS = 1.  The consumer must
	 * compare CCS to the cycle bit to check ownership, so CCS = 1.
	 * compare CCS to the cycle bit to check ownership, so CCS = 1.
	 *
	 * New rings are initialized with cycle state equal to 1; if we are
	 * handling ring expansion, set the cycle state equal to the old ring.
	 */
	 */
	ring->cycle_state = 1;
	ring->cycle_state = cycle_state;
	/* Not necessary for new rings, but needed for re-initialized rings */
	/* Not necessary for new rings, but needed for re-initialized rings */
	ring->enq_updates = 0;
	ring->enq_updates = 0;
	ring->deq_updates = 0;
	ring->deq_updates = 0;

	/*
	 * Each segment has a link TRB, and leave an extra TRB for SW
	 * accounting purpose
	 */
	ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
}

/* Allocate segments and link them for a ring */
static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
		struct xhci_segment **first, struct xhci_segment **last,
		unsigned int num_segs, unsigned int cycle_state,
		enum xhci_ring_type type, gfp_t flags)
{
	struct xhci_segment *prev;

	prev = xhci_segment_alloc(xhci, cycle_state, flags);
	if (!prev)
		return -ENOMEM;
	num_segs--;

	*first = prev;
	while (num_segs > 0) {
		struct xhci_segment	*next;

		next = xhci_segment_alloc(xhci, cycle_state, flags);
		if (!next) {
			xhci_free_segments_for_ring(xhci, *first);
			return -ENOMEM;
		}
		xhci_link_segments(xhci, prev, next, type);

		prev = next;
		num_segs--;
	}
	xhci_link_segments(xhci, prev, *first, type);
	*last = prev;

	return 0;
}
}


/**
/**
@@ -144,44 +227,34 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring)
 * See section 4.9.1 and figures 15 and 16.
 * See section 4.9.1 and figures 15 and 16.
 */
 */
static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
		unsigned int num_segs, bool link_trbs, bool isoc, gfp_t flags)
		unsigned int num_segs, unsigned int cycle_state,
		enum xhci_ring_type type, gfp_t flags)
{
{
	struct xhci_ring	*ring;
	struct xhci_ring	*ring;
	struct xhci_segment	*prev;
	int ret;


	ring = kzalloc(sizeof *(ring), flags);
	ring = kzalloc(sizeof *(ring), flags);
	if (!ring)
	if (!ring)
		return NULL;
		return NULL;


	ring->num_segs = num_segs;
	INIT_LIST_HEAD(&ring->td_list);
	INIT_LIST_HEAD(&ring->td_list);
	ring->type = type;
	if (num_segs == 0)
	if (num_segs == 0)
		return ring;
		return ring;


	ring->first_seg = xhci_segment_alloc(xhci, flags);
	ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
	if (!ring->first_seg)
			&ring->last_seg, num_segs, cycle_state, type, flags);
	if (ret)
		goto fail;
		goto fail;
	num_segs--;

	prev = ring->first_seg;
	while (num_segs > 0) {
		struct xhci_segment	*next;


		next = xhci_segment_alloc(xhci, flags);
	/* Only event ring does not use link TRB */
		if (!next)
	if (type != TYPE_EVENT) {
			goto fail;
		xhci_link_segments(xhci, prev, next, link_trbs, isoc);

		prev = next;
		num_segs--;
	}
	xhci_link_segments(xhci, prev, ring->first_seg, link_trbs, isoc);

	if (link_trbs) {
		/* See section 4.9.2.1 and 6.4.4.1 */
		/* See section 4.9.2.1 and 6.4.4.1 */
		prev->trbs[TRBS_PER_SEGMENT-1].link.control |=
		ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
			cpu_to_le32(LINK_TOGGLE);
			cpu_to_le32(LINK_TOGGLE);
	}
	}
	xhci_initialize_ring_info(ring);
	xhci_initialize_ring_info(ring, cycle_state);
	return ring;
	return ring;


fail:
fail:
@@ -217,23 +290,64 @@ void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
 * pointers to the beginning of the ring.
 * pointers to the beginning of the ring.
 */
 */
static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
		struct xhci_ring *ring, bool isoc)
			struct xhci_ring *ring, unsigned int cycle_state,
			enum xhci_ring_type type)
{
{
	struct xhci_segment	*seg = ring->first_seg;
	struct xhci_segment	*seg = ring->first_seg;
	int i;

	do {
	do {
		memset(seg->trbs, 0,
		memset(seg->trbs, 0,
				sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
				sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
		if (cycle_state == 0) {
			for (i = 0; i < TRBS_PER_SEGMENT; i++)
				seg->trbs[i].link.control |= TRB_CYCLE;
		}
		/* All endpoint rings have link TRBs */
		/* All endpoint rings have link TRBs */
		xhci_link_segments(xhci, seg, seg->next, 1, isoc);
		xhci_link_segments(xhci, seg, seg->next, type);
		seg = seg->next;
		seg = seg->next;
	} while (seg != ring->first_seg);
	} while (seg != ring->first_seg);
	xhci_initialize_ring_info(ring);
	ring->type = type;
	xhci_initialize_ring_info(ring, cycle_state);
	/* td list should be empty since all URBs have been cancelled,
	/* td list should be empty since all URBs have been cancelled,
	 * but just in case...
	 * but just in case...
	 */
	 */
	INIT_LIST_HEAD(&ring->td_list);
	INIT_LIST_HEAD(&ring->td_list);
}
}


/*
 * Expand an existing ring.
 * Look for a cached ring or allocate a new ring which has same segment numbers
 * and link the two rings.
 */
int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
				unsigned int num_trbs, gfp_t flags)
{
	struct xhci_segment	*first;
	struct xhci_segment	*last;
	unsigned int		num_segs;
	unsigned int		num_segs_needed;
	int			ret;

	num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
				(TRBS_PER_SEGMENT - 1);

	/* Allocate number of segments we needed, or double the ring size */
	num_segs = ring->num_segs > num_segs_needed ?
			ring->num_segs : num_segs_needed;

	ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
			num_segs, ring->cycle_state, ring->type, flags);
	if (ret)
		return -ENOMEM;

	xhci_link_rings(xhci, ring, first, last, num_segs);
	xhci_dbg(xhci, "ring expansion succeed, now has %d segments\n",
			ring->num_segs);

	return 0;
}

#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)


static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
@@ -528,7 +642,7 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
	 */
	 */
	for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
	for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
		stream_info->stream_rings[cur_stream] =
		stream_info->stream_rings[cur_stream] =
			xhci_ring_alloc(xhci, 1, true, false, mem_flags);
			xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, mem_flags);
		cur_ring = stream_info->stream_rings[cur_stream];
		cur_ring = stream_info->stream_rings[cur_stream];
		if (!cur_ring)
		if (!cur_ring)
			goto cleanup_rings;
			goto cleanup_rings;
@@ -862,7 +976,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
	}
	}


	/* Allocate endpoint 0 ring */
	/* Allocate endpoint 0 ring */
	dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, false, flags);
	dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, flags);
	if (!dev->eps[0].ring)
	if (!dev->eps[0].ring)
		goto fail;
		goto fail;


@@ -1300,24 +1414,16 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
	struct xhci_ring *ep_ring;
	struct xhci_ring *ep_ring;
	unsigned int max_packet;
	unsigned int max_packet;
	unsigned int max_burst;
	unsigned int max_burst;
	enum xhci_ring_type type;
	u32 max_esit_payload;
	u32 max_esit_payload;


	ep_index = xhci_get_endpoint_index(&ep->desc);
	ep_index = xhci_get_endpoint_index(&ep->desc);
	ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
	ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);


	type = usb_endpoint_type(&ep->desc);
	/* Set up the endpoint ring */
	/* Set up the endpoint ring */
	/*
	 * Isochronous endpoint ring needs bigger size because one isoc URB
	 * carries multiple packets and it will insert multiple tds to the
	 * ring.
	 * This should be replaced with dynamic ring resizing in the future.
	 */
	if (usb_endpoint_xfer_isoc(&ep->desc))
		virt_dev->eps[ep_index].new_ring =
			xhci_ring_alloc(xhci, 8, true, true, mem_flags);
	else
	virt_dev->eps[ep_index].new_ring =
	virt_dev->eps[ep_index].new_ring =
			xhci_ring_alloc(xhci, 1, true, false, mem_flags);
		xhci_ring_alloc(xhci, 2, 1, type, mem_flags);
	if (!virt_dev->eps[ep_index].new_ring) {
	if (!virt_dev->eps[ep_index].new_ring) {
		/* Attempt to use the ring cache */
		/* Attempt to use the ring cache */
		if (virt_dev->num_rings_cached == 0)
		if (virt_dev->num_rings_cached == 0)
@@ -1327,7 +1433,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
		virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
		virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
		virt_dev->num_rings_cached--;
		virt_dev->num_rings_cached--;
		xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
		xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
			usb_endpoint_xfer_isoc(&ep->desc) ? true : false);
					1, type);
	}
	}
	virt_dev->eps[ep_index].skip = false;
	virt_dev->eps[ep_index].skip = false;
	ep_ring = virt_dev->eps[ep_index].new_ring;
	ep_ring = virt_dev->eps[ep_index].new_ring;
@@ -2235,7 +2341,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
		goto fail;
		goto fail;


	/* Set up the command ring to have one segments for now. */
	/* Set up the command ring to have one segments for now. */
	xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, false, flags);
	xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
	if (!xhci->cmd_ring)
	if (!xhci->cmd_ring)
		goto fail;
		goto fail;
	xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
	xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
@@ -2266,7 +2372,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
	 * the event ring segment table (ERST).  Section 4.9.3.
	 * the event ring segment table (ERST).  Section 4.9.3.
	 */
	 */
	xhci_dbg(xhci, "// Allocating event ring\n");
	xhci_dbg(xhci, "// Allocating event ring\n");
	xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, false,
	xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
						flags);
						flags);
	if (!xhci->event_ring)
	if (!xhci->event_ring)
		goto fail;
		goto fail;
+205 −0
Original line number Original line Diff line number Diff line
/*
 * xhci-plat.c - xHCI host controller driver platform Bus Glue.
 *
 * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com
 * Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
 *
 * A lot of code borrowed from the Linux xHCI driver.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * version 2 as published by the Free Software Foundation.
 */

#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/slab.h>

#include "xhci.h"

static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
{
	/*
	 * As of now platform drivers don't provide MSI support so we ensure
	 * here that the generic code does not try to make a pci_dev from our
	 * dev struct in order to setup MSI
	 */
	xhci->quirks |= XHCI_BROKEN_MSI;
}

/* called during probe() after chip reset completes */
static int xhci_plat_setup(struct usb_hcd *hcd)
{
	return xhci_gen_setup(hcd, xhci_plat_quirks);
}

static const struct hc_driver xhci_plat_xhci_driver = {
	.description =		"xhci-hcd",
	.product_desc =		"xHCI Host Controller",
	.hcd_priv_size =	sizeof(struct xhci_hcd *),

	/*
	 * generic hardware linkage
	 */
	.irq =			xhci_irq,
	.flags =		HCD_MEMORY | HCD_USB3 | HCD_SHARED,

	/*
	 * basic lifecycle operations
	 */
	.reset =		xhci_plat_setup,
	.start =		xhci_run,
	.stop =			xhci_stop,
	.shutdown =		xhci_shutdown,

	/*
	 * managing i/o requests and associated device resources
	 */
	.urb_enqueue =		xhci_urb_enqueue,
	.urb_dequeue =		xhci_urb_dequeue,
	.alloc_dev =		xhci_alloc_dev,
	.free_dev =		xhci_free_dev,
	.alloc_streams =	xhci_alloc_streams,
	.free_streams =		xhci_free_streams,
	.add_endpoint =		xhci_add_endpoint,
	.drop_endpoint =	xhci_drop_endpoint,
	.endpoint_reset =	xhci_endpoint_reset,
	.check_bandwidth =	xhci_check_bandwidth,
	.reset_bandwidth =	xhci_reset_bandwidth,
	.address_device =	xhci_address_device,
	.update_hub_device =	xhci_update_hub_device,
	.reset_device =		xhci_discover_or_reset_device,

	/*
	 * scheduling support
	 */
	.get_frame_number =	xhci_get_frame,

	/* Root hub support */
	.hub_control =		xhci_hub_control,
	.hub_status_data =	xhci_hub_status_data,
	.bus_suspend =		xhci_bus_suspend,
	.bus_resume =		xhci_bus_resume,
};

static int xhci_plat_probe(struct platform_device *pdev)
{
	const struct hc_driver	*driver;
	struct xhci_hcd		*xhci;
	struct resource         *res;
	struct usb_hcd		*hcd;
	int			ret;
	int			irq;

	if (usb_disabled())
		return -ENODEV;

	driver = &xhci_plat_xhci_driver;

	irq = platform_get_irq(pdev, 0);
	if (irq < 0)
		return -ENODEV;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res)
		return -ENODEV;

	hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
	if (!hcd)
		return -ENOMEM;

	hcd->rsrc_start = res->start;
	hcd->rsrc_len = resource_size(res);

	if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
				driver->description)) {
		dev_dbg(&pdev->dev, "controller already in use\n");
		ret = -EBUSY;
		goto put_hcd;
	}

	hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
	if (!hcd->regs) {
		dev_dbg(&pdev->dev, "error mapping memory\n");
		ret = -EFAULT;
		goto release_mem_region;
	}

	ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
	if (ret)
		goto unmap_registers;

	/* USB 2.0 roothub is stored in the platform_device now. */
	hcd = dev_get_drvdata(&pdev->dev);
	xhci = hcd_to_xhci(hcd);
	xhci->shared_hcd = usb_create_shared_hcd(driver, &pdev->dev,
			dev_name(&pdev->dev), hcd);
	if (!xhci->shared_hcd) {
		ret = -ENOMEM;
		goto dealloc_usb2_hcd;
	}

	/*
	 * Set the xHCI pointer before xhci_plat_setup() (aka hcd_driver.reset)
	 * is called by usb_add_hcd().
	 */
	*((struct xhci_hcd **) xhci->shared_hcd->hcd_priv) = xhci;

	ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
	if (ret)
		goto put_usb3_hcd;

	return 0;

put_usb3_hcd:
	usb_put_hcd(xhci->shared_hcd);

dealloc_usb2_hcd:
	usb_remove_hcd(hcd);

unmap_registers:
	iounmap(hcd->regs);

release_mem_region:
	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);

put_hcd:
	usb_put_hcd(hcd);

	return ret;
}

static int xhci_plat_remove(struct platform_device *dev)
{
	struct usb_hcd	*hcd = platform_get_drvdata(dev);
	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);

	usb_remove_hcd(xhci->shared_hcd);
	usb_put_hcd(xhci->shared_hcd);

	usb_remove_hcd(hcd);
	iounmap(hcd->regs);
	usb_put_hcd(hcd);
	kfree(xhci);

	return 0;
}

static struct platform_driver usb_xhci_driver = {
	.probe	= xhci_plat_probe,
	.remove	= xhci_plat_remove,
	.driver	= {
		.name = "xhci-hcd",
	},
};
MODULE_ALIAS("platform:xhci-hcd");

int xhci_register_plat(void)
{
	return platform_driver_register(&usb_xhci_driver);
}

void xhci_unregister_plat(void)
{
	platform_driver_unregister(&usb_xhci_driver);
}
+146 −111

File changed.

Preview size limit exceeded, changes collapsed.

Loading