Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3bfb3b63 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "sound: usb: Add helper APIs to enable audio stream"

parents 5e95e76b a34c8cfc
Loading
Loading
Loading
Loading
+57 −0
Original line number Diff line number Diff line
@@ -2242,8 +2242,65 @@ int usb_hcd_get_frame_number (struct usb_device *udev)
	return hcd->driver->get_frame_number (hcd);
}

int usb_hcd_sec_event_ring_setup(struct usb_device *udev,
	unsigned int intr_num)
{
	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);

	if (!HCD_RH_RUNNING(hcd))
		return 0;

	return hcd->driver->sec_event_ring_setup(hcd, intr_num);
}

int usb_hcd_sec_event_ring_cleanup(struct usb_device *udev,
	unsigned int intr_num)
{
	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);

	if (!HCD_RH_RUNNING(hcd))
		return 0;

	return hcd->driver->sec_event_ring_cleanup(hcd, intr_num);
}

/*-------------------------------------------------------------------------*/

dma_addr_t
usb_hcd_get_sec_event_ring_dma_addr(struct usb_device *udev,
	unsigned int intr_num)
{
	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);

	if (!HCD_RH_RUNNING(hcd))
		return 0;

	return hcd->driver->get_sec_event_ring_dma_addr(hcd, intr_num);
}

dma_addr_t
usb_hcd_get_dcba_dma_addr(struct usb_device *udev)
{
	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);

	if (!HCD_RH_RUNNING(hcd))
		return 0;

	return hcd->driver->get_dcba_dma_addr(hcd, udev);
}

dma_addr_t
usb_hcd_get_xfer_ring_dma_addr(struct usb_device *udev,
		struct usb_host_endpoint *ep)
{
	struct usb_hcd	*hcd = bus_to_hcd(udev->bus);

	if (!HCD_RH_RUNNING(hcd))
		return 0;

	return hcd->driver->get_xfer_ring_dma_addr(hcd, udev, ep);
}

#ifdef	CONFIG_PM

int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg)
+48 −0
Original line number Diff line number Diff line
@@ -824,6 +824,54 @@ int usb_get_current_frame_number(struct usb_device *dev)
}
EXPORT_SYMBOL_GPL(usb_get_current_frame_number);

int usb_sec_event_ring_setup(struct usb_device *dev,
	unsigned int intr_num)
{
	if (dev->state == USB_STATE_NOTATTACHED)
		return 0;

	return usb_hcd_sec_event_ring_setup(dev, intr_num);
}
EXPORT_SYMBOL(usb_sec_event_ring_setup);

int usb_sec_event_ring_cleanup(struct usb_device *dev,
	unsigned int intr_num)
{
	return usb_hcd_sec_event_ring_cleanup(dev, intr_num);
}
EXPORT_SYMBOL(usb_sec_event_ring_cleanup);

dma_addr_t
usb_get_sec_event_ring_dma_addr(struct usb_device *dev,
	unsigned int intr_num)
{
	if (dev->state == USB_STATE_NOTATTACHED)
		return 0;

	return usb_hcd_get_sec_event_ring_dma_addr(dev, intr_num);
}
EXPORT_SYMBOL(usb_get_sec_event_ring_dma_addr);

dma_addr_t
usb_get_dcba_dma_addr(struct usb_device *dev)
{
	if (dev->state == USB_STATE_NOTATTACHED)
		return 0;

	return usb_hcd_get_dcba_dma_addr(dev);
}
EXPORT_SYMBOL(usb_get_dcba_dma_addr);

dma_addr_t usb_get_xfer_ring_dma_addr(struct usb_device *dev,
	struct usb_host_endpoint *ep)
{
	if (dev->state == USB_STATE_NOTATTACHED)
		return 0;

	return usb_hcd_get_xfer_ring_dma_addr(dev, ep);
}
EXPORT_SYMBOL(usb_get_xfer_ring_dma_addr);

/*-------------------------------------------------------------------*/
/*
 * __usb_get_extra_descriptor() finds a descriptor of specific type in the
+306 −91
Original line number Diff line number Diff line
@@ -1776,25 +1776,151 @@ void xhci_free_command(struct xhci_hcd *xhci,
	kfree(command);
}

void xhci_mem_cleanup(struct xhci_hcd *xhci)
void xhci_handle_sec_intr_events(struct xhci_hcd *xhci, int intr_num)
{
	union xhci_trb *erdp_trb, *current_trb;
	struct xhci_segment	*seg;
	u64 erdp_reg;
	u32 iman_reg;
	dma_addr_t deq;
	unsigned long segment_offset;

	/* disable irq, ack pending interrupt and ack all pending events */

	iman_reg =
		readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
	iman_reg &= ~IMAN_IE;
	writel_relaxed(iman_reg,
			&xhci->sec_ir_set[intr_num]->irq_pending);
	iman_reg =
		readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
	if (iman_reg & IMAN_IP)
		writel_relaxed(iman_reg,
			&xhci->sec_ir_set[intr_num]->irq_pending);

	/* last acked event trb is in erdp reg  */
	erdp_reg =
		xhci_read_64(xhci, &xhci->sec_ir_set[intr_num]->erst_dequeue);
	deq = (dma_addr_t)(erdp_reg & ~ERST_PTR_MASK);
	if (!deq) {
		pr_debug("%s: event ring handling not required\n", __func__);
		return;
	}

	seg = xhci->sec_event_ring[intr_num]->first_seg;
	segment_offset = deq - seg->dma;

	/* find out virtual address of the last acked event trb */
	erdp_trb = current_trb = &seg->trbs[0] +
				(segment_offset/sizeof(*current_trb));

	/* read cycle state of the last acked trb to find out CCS */
	xhci->sec_event_ring[intr_num]->cycle_state =
				(current_trb->event_cmd.flags & TRB_CYCLE);

	while (1) {
		/* last trb of the event ring: toggle cycle state */
		if (current_trb == &seg->trbs[TRBS_PER_SEGMENT - 1]) {
			xhci->sec_event_ring[intr_num]->cycle_state ^= 1;
			current_trb = &seg->trbs[0];
		} else {
			current_trb++;
		}

		/* cycle state transition */
		if ((le32_to_cpu(current_trb->event_cmd.flags) & TRB_CYCLE) !=
		    xhci->sec_event_ring[intr_num]->cycle_state)
			break;
	}

	if (erdp_trb != current_trb) {
		deq =
		xhci_trb_virt_to_dma(xhci->sec_event_ring[intr_num]->deq_seg,
					current_trb);
		if (deq == 0)
			xhci_warn(xhci,
				"WARN ivalid SW event ring dequeue ptr.\n");
		/* Update HC event ring dequeue pointer */
		erdp_reg &= ERST_PTR_MASK;
		erdp_reg |= ((u64) deq & (u64) ~ERST_PTR_MASK);
	}

	/* Clear the event handler busy flag (RW1C); event ring is empty. */
	erdp_reg |= ERST_EHB;
	xhci_write_64(xhci, erdp_reg,
			&xhci->sec_ir_set[intr_num]->erst_dequeue);
}

int xhci_sec_event_ring_cleanup(struct usb_hcd *hcd, unsigned int intr_num)
{
	struct device	*dev = xhci_to_hcd(xhci)->self.sysdev;
	int size;
	int i, j, num_ports;
	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
	struct device	*dev = xhci_to_hcd(xhci)->self.controller;

	cancel_delayed_work_sync(&xhci->cmd_timer);
	if (intr_num >= xhci->max_interrupters) {
		xhci_err(xhci, "invalid secondary interrupter num %d\n",
			intr_num);
		return -EINVAL;
	}

	size =
	sizeof(struct xhci_erst_entry)*(xhci->sec_erst[intr_num].num_entries);
	if (xhci->sec_erst[intr_num].entries) {
		xhci_handle_sec_intr_events(xhci, intr_num);
		dma_free_coherent(dev, size, xhci->sec_erst[intr_num].entries,
				xhci->sec_erst[intr_num].erst_dma_addr);
		xhci->sec_erst[intr_num].entries = NULL;
	}
	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed SEC ERST#%d",
		intr_num);
	if (xhci->sec_event_ring[intr_num])
		xhci_ring_free(xhci, xhci->sec_event_ring[intr_num]);

	xhci->sec_event_ring[intr_num] = NULL;
	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
		"Freed sec event ring");

	return 0;
}

	/* Free the Event Ring Segment Table and the actual Event Ring */
void xhci_event_ring_cleanup(struct xhci_hcd *xhci)
{
	int size;
	unsigned int i;
	struct device	*dev = xhci_to_hcd(xhci)->self.controller;

	/* sec event ring clean up */
	for (i = 1; i < xhci->max_interrupters; i++)
		xhci_sec_event_ring_cleanup(xhci_to_hcd(xhci), i);

	kfree(xhci->sec_ir_set);
	xhci->sec_ir_set = NULL;
	kfree(xhci->sec_erst);
	xhci->sec_erst = NULL;
	kfree(xhci->sec_event_ring);
	xhci->sec_event_ring = NULL;

	/* primary event ring clean up */
	size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
	if (xhci->erst.entries)
		dma_free_coherent(dev, size,
				xhci->erst.entries, xhci->erst.erst_dma_addr);
	xhci->erst.entries = NULL;
	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed ERST");
	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed primary ERST");
	if (xhci->event_ring)
		xhci_ring_free(xhci, xhci->event_ring);
	xhci->event_ring = NULL;
	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed priamry event ring");
}

void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
	struct device	*dev = xhci_to_hcd(xhci)->self.controller;
	int i, j, num_ports;

	cancel_delayed_work_sync(&xhci->cmd_timer);

	xhci_event_ring_cleanup(xhci);

	if (xhci->lpm_command)
		xhci_free_command(xhci, xhci->lpm_command);
@@ -2035,30 +2161,6 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
	return 0;
}

static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
{
	u64 temp;
	dma_addr_t deq;

	deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
			xhci->event_ring->dequeue);
	if (deq == 0 && !in_interrupt())
		xhci_warn(xhci, "WARN something wrong with SW event ring "
				"dequeue ptr.\n");
	/* Update HC event ring dequeue pointer */
	temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
	temp &= ERST_PTR_MASK;
	/* Don't clear the EHB bit (which is RW1C) because
	 * there might be more events to service.
	 */
	temp &= ~ERST_EHB;
	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
			"// Write event ring dequeue pointer, "
			"preserving EHB bit");
	xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
			&xhci->ir_set->erst_dequeue);
}

static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
		__le32 __iomem *addr, int max_caps)
{
@@ -2321,13 +2423,183 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
	return 0;
}

int xhci_event_ring_setup(struct xhci_hcd *xhci, struct xhci_ring **er,
	struct xhci_intr_reg __iomem *ir_set, struct xhci_erst *erst,
	unsigned int intr_num, gfp_t flags)
{
	dma_addr_t dma, deq;
	u64 val_64;
	unsigned int val;
	struct xhci_segment *seg;
	struct device *dev = xhci_to_hcd(xhci)->self.controller;

	*er = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT, 0, flags);
	if (!*er)
		return -ENOMEM;

	erst->entries = dma_alloc_coherent(dev,
			sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
			flags);
	if (!erst->entries) {
		xhci_ring_free(xhci, *er);
		return -ENOMEM;
	}

	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
		"intr# %d: Allocated event ring segment table at 0x%llx",
		intr_num, (unsigned long long)dma);

	memset(erst->entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
	erst->num_entries = ERST_NUM_SEGS;
	erst->erst_dma_addr = dma;
	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
		"intr# %d: num segs = %i, virt addr = %p, dma addr = 0x%llx",
			intr_num,
			erst->num_entries,
			erst->entries,
			(unsigned long long)erst->erst_dma_addr);

	/* set ring base address and size for each segment table entry */
	for (val = 0, seg = (*er)->first_seg; val < ERST_NUM_SEGS; val++) {
		struct xhci_erst_entry *entry = &erst->entries[val];

		entry->seg_addr = cpu_to_le64(seg->dma);
		entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
		entry->rsvd = 0;
		seg = seg->next;
	}

	/* set ERST count with the number of entries in the segment table */
	val = readl_relaxed(&ir_set->erst_size);
	val &= ERST_SIZE_MASK;
	val |= ERST_NUM_SEGS;
	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
		"Write ERST size = %i to ir_set %d (some bits preserved)", val,
		intr_num);
	writel_relaxed(val, &ir_set->erst_size);

	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
			"intr# %d: Set ERST entries to point to event ring.",
			intr_num);
	/* set the segment table base address */
	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
			"Set ERST base address for ir_set %d = 0x%llx",
			intr_num,
			(unsigned long long)erst->erst_dma_addr);
	val_64 = xhci_read_64(xhci, &ir_set->erst_base);
	val_64 &= ERST_PTR_MASK;
	val_64 |= (erst->erst_dma_addr & (u64) ~ERST_PTR_MASK);
	xhci_write_64(xhci, val_64, &ir_set->erst_base);

	/* Set the event ring dequeue address */
	deq = xhci_trb_virt_to_dma((*er)->deq_seg, (*er)->dequeue);
	if (deq == 0 && !in_interrupt())
		xhci_warn(xhci,
		"intr# %d:WARN something wrong with SW event ring deq ptr.\n",
		intr_num);
	/* Update HC event ring dequeue pointer */
	val_64 = xhci_read_64(xhci, &ir_set->erst_dequeue);
	val_64 &= ERST_PTR_MASK;
	/* Don't clear the EHB bit (which is RW1C) because
	 * there might be more events to service.
	 */
	val_64 &= ~ERST_EHB;
	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
		"intr# %d:Write event ring dequeue pointer, preserving EHB bit",
		intr_num);
	xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | val_64,
			&ir_set->erst_dequeue);
	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
			"Wrote ERST address to ir_set %d.", intr_num);
	xhci_print_ir_set(xhci, intr_num);

	return 0;
}

int xhci_sec_event_ring_setup(struct usb_hcd *hcd, unsigned int intr_num)
{
	int ret;
	struct xhci_hcd *xhci = hcd_to_xhci(hcd);

	if ((xhci->xhc_state & XHCI_STATE_HALTED) || !xhci->sec_ir_set
		|| !xhci->sec_event_ring || !xhci->sec_erst ||
		intr_num >= xhci->max_interrupters) {
		xhci_err(xhci,
		"%s:state %x ir_set %p evt_ring %p erst %p intr# %d\n",
		__func__, xhci->xhc_state, xhci->sec_ir_set,
		xhci->sec_event_ring, xhci->sec_erst, intr_num);
		return -EINVAL;
	}

	if (xhci->sec_event_ring && xhci->sec_event_ring[intr_num]
		&& xhci->sec_event_ring[intr_num]->first_seg)
		goto done;

	xhci->sec_ir_set[intr_num] = &xhci->run_regs->ir_set[intr_num];
	ret = xhci_event_ring_setup(xhci,
				&xhci->sec_event_ring[intr_num],
				xhci->sec_ir_set[intr_num],
				&xhci->sec_erst[intr_num],
				intr_num, GFP_KERNEL);
	if (ret) {
		xhci_err(xhci, "sec event ring setup failed inter#%d\n",
			intr_num);
		return ret;
	}
done:
	return 0;
}

int xhci_event_ring_init(struct xhci_hcd *xhci, gfp_t flags)
{
	int ret = 0;

	/* primary + secondary */
	xhci->max_interrupters = HCS_MAX_INTRS(xhci->hcs_params1);

	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
		"// Allocating primary event ring");

	/* Set ir_set to interrupt register set 0 */
	xhci->ir_set = &xhci->run_regs->ir_set[0];
	ret = xhci_event_ring_setup(xhci, &xhci->event_ring, xhci->ir_set,
		&xhci->erst, 0, flags);
	if (ret) {
		xhci_err(xhci, "failed to setup primary event ring\n");
		goto fail;
	}

	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
		"// Allocating sec event ring related pointers");

	xhci->sec_ir_set = kcalloc(xhci->max_interrupters,
				sizeof(*xhci->sec_ir_set), flags);
	if (!xhci->sec_ir_set) {
		ret = -ENOMEM;
		goto fail;
	}

	xhci->sec_event_ring = kcalloc(xhci->max_interrupters,
				sizeof(*xhci->sec_event_ring), flags);
	if (!xhci->sec_event_ring) {
		ret = -ENOMEM;
		goto fail;
	}

	xhci->sec_erst = kcalloc(xhci->max_interrupters,
				sizeof(*xhci->sec_erst), flags);
	if (!xhci->sec_erst)
		ret = -ENOMEM;
fail:
	return ret;
}

int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
{
	dma_addr_t	dma;
	struct device	*dev = xhci_to_hcd(xhci)->self.sysdev;
	unsigned int	val, val2;
	u64		val_64;
	struct xhci_segment	*seg;
	u32 page_size, temp;
	int i;

@@ -2453,73 +2725,16 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
	xhci->dba = (void __iomem *) xhci->cap_regs + val;
	xhci_dbg_regs(xhci);
	xhci_print_run_regs(xhci);
	/* Set ir_set to interrupt register set 0 */
	xhci->ir_set = &xhci->run_regs->ir_set[0];

	/*
	 * Event ring setup: Allocate a normal ring, but also setup
	 * the event ring segment table (ERST).  Section 4.9.3.
	 */
	xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
	xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
					0, flags);
	if (!xhci->event_ring)
		goto fail;
	if (xhci_check_trb_in_td_math(xhci) < 0)
	if (xhci_event_ring_init(xhci, GFP_KERNEL))
		goto fail;

	xhci->erst.entries = dma_alloc_coherent(dev,
			sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma,
			flags);
	if (!xhci->erst.entries)
	if (xhci_check_trb_in_td_math(xhci) < 0)
		goto fail;
	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
			"// Allocated event ring segment table at 0x%llx",
			(unsigned long long)dma);

	memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
	xhci->erst.num_entries = ERST_NUM_SEGS;
	xhci->erst.erst_dma_addr = dma;
	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
			"Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx",
			xhci->erst.num_entries,
			xhci->erst.entries,
			(unsigned long long)xhci->erst.erst_dma_addr);

	/* set ring base address and size for each segment table entry */
	for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) {
		struct xhci_erst_entry *entry = &xhci->erst.entries[val];
		entry->seg_addr = cpu_to_le64(seg->dma);
		entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
		entry->rsvd = 0;
		seg = seg->next;
	}

	/* set ERST count with the number of entries in the segment table */
	val = readl(&xhci->ir_set->erst_size);
	val &= ERST_SIZE_MASK;
	val |= ERST_NUM_SEGS;
	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
			"// Write ERST size = %i to ir_set 0 (some bits preserved)",
			val);
	writel(val, &xhci->ir_set->erst_size);

	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
			"// Set ERST entries to point to event ring.");
	/* set the segment table base address */
	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
			"// Set ERST base address for ir_set 0 = 0x%llx",
			(unsigned long long)xhci->erst.erst_dma_addr);
	val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
	val_64 &= ERST_PTR_MASK;
	val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
	xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);

	/* Set the event ring dequeue address */
	xhci_set_hc_event_deq(xhci);
	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
			"Wrote ERST address to ir_set 0.");
	xhci_print_ir_set(xhci, 0);

	/*
	 * XXX: Might need to set the Interrupter Moderation Register to
+60 −0
Original line number Diff line number Diff line
@@ -4906,6 +4906,61 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
}
EXPORT_SYMBOL_GPL(xhci_gen_setup);

dma_addr_t xhci_get_sec_event_ring_dma_addr(struct usb_hcd *hcd,
	unsigned int intr_num)
{
	struct xhci_hcd *xhci = hcd_to_xhci(hcd);

	if (intr_num >= xhci->max_interrupters) {
		xhci_err(xhci, "intr num %d >= max intrs %d\n", intr_num,
			xhci->max_interrupters);
		return 0;
	}

	if (!(xhci->xhc_state & XHCI_STATE_HALTED) &&
		xhci->sec_event_ring && xhci->sec_event_ring[intr_num]
		&& xhci->sec_event_ring[intr_num]->first_seg)
		return xhci->sec_event_ring[intr_num]->first_seg->dma;

	return 0;
}

dma_addr_t xhci_get_dcba_dma_addr(struct usb_hcd *hcd,
	struct usb_device *udev)
{
	struct xhci_hcd *xhci = hcd_to_xhci(hcd);

	if (!(xhci->xhc_state & XHCI_STATE_HALTED) && xhci->dcbaa)
		return xhci->dcbaa->dev_context_ptrs[udev->slot_id];

	return 0;
}

dma_addr_t xhci_get_xfer_ring_dma_addr(struct usb_hcd *hcd,
	struct usb_device *udev, struct usb_host_endpoint *ep)
{
	int ret;
	unsigned int ep_index;
	struct xhci_virt_device *virt_dev;

	struct xhci_hcd *xhci = hcd_to_xhci(hcd);

	ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
	if (ret <= 0) {
		xhci_err(xhci, "%s: invalid args\n", __func__);
		return 0;
	}

	virt_dev = xhci->devs[udev->slot_id];
	ep_index = xhci_get_endpoint_index(&ep->desc);

	if (virt_dev->eps[ep_index].ring &&
		virt_dev->eps[ep_index].ring->first_seg)
		return virt_dev->eps[ep_index].ring->first_seg->dma;

	return 0;
}

static const struct hc_driver xhci_hc_driver = {
	.description =		"xhci-hcd",
	.product_desc =		"xHCI Host Controller",
@@ -4965,6 +5020,11 @@ static const struct hc_driver xhci_hc_driver = {
	.enable_usb3_lpm_timeout =	xhci_enable_usb3_lpm_timeout,
	.disable_usb3_lpm_timeout =	xhci_disable_usb3_lpm_timeout,
	.find_raw_port_number =	xhci_find_raw_port_number,
	.sec_event_ring_setup =		xhci_sec_event_ring_setup,
	.sec_event_ring_cleanup =	xhci_sec_event_ring_cleanup,
	.get_sec_event_ring_dma_addr =	xhci_get_sec_event_ring_dma_addr,
	.get_xfer_ring_dma_addr =	xhci_get_xfer_ring_dma_addr,
	.get_dcba_dma_addr =		xhci_get_dcba_dma_addr,
};

void xhci_init_driver(struct hc_driver *drv,
+9 −0
Original line number Diff line number Diff line
@@ -1708,6 +1708,8 @@ struct xhci_hcd {
	struct xhci_doorbell_array __iomem *dba;
	/* Our HCD's current interrupter register set */
	struct	xhci_intr_reg __iomem *ir_set;
	/* secondary interrupter */
	struct	xhci_intr_reg __iomem **sec_ir_set;

	int		core_id;

@@ -1750,6 +1752,11 @@ struct xhci_hcd {
	struct xhci_command	*current_cmd;
	struct xhci_ring	*event_ring;
	struct xhci_erst	erst;

	/* secondary event ring and erst */
	struct xhci_ring	**sec_event_ring;
	struct xhci_erst	*sec_erst;

	/* Scratchpad */
	struct xhci_scratchpad  *scratchpad;
	/* Store LPM test failed devices' information */
@@ -2002,6 +2009,8 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
void xhci_urb_free_priv(struct urb_priv *urb_priv);
void xhci_free_command(struct xhci_hcd *xhci,
		struct xhci_command *command);
int xhci_sec_event_ring_setup(struct usb_hcd *hcd, unsigned int intr_num);
int xhci_sec_event_ring_cleanup(struct usb_hcd *hcd, unsigned int intr_num);

/* xHCI host controller glue */
typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
Loading