Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 23e3be11 authored by Sarah Sharp's avatar Sarah Sharp Committed by Greg Kroah-Hartman
Browse files

USB: xhci: Avoid global namespace pollution.



Make all globally visible functions start with xhci_ and mark functions as
static if they're only called within the same C file.  Fix some long lines
while we're at it.

Signed-off-by: default avatarSarah Sharp <sarah.a.sharp@linux.intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent 06e7a148
Loading
Loading
Loading
Loading
+7 −7
Original line number Diff line number Diff line
@@ -55,7 +55,7 @@ void xhci_dbg_regs(struct xhci_hcd *xhci)
	xhci_dbg(xhci, "// Doorbell array at %p:\n", xhci->dba);
}

void xhci_print_cap_regs(struct xhci_hcd *xhci)
static void xhci_print_cap_regs(struct xhci_hcd *xhci)
{
	u32 temp;

@@ -106,7 +106,7 @@ void xhci_print_cap_regs(struct xhci_hcd *xhci)
	xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK);
}

void xhci_print_command_reg(struct xhci_hcd *xhci)
static void xhci_print_command_reg(struct xhci_hcd *xhci)
{
	u32 temp;

@@ -124,7 +124,7 @@ void xhci_print_command_reg(struct xhci_hcd *xhci)
			(temp & CMD_LRESET) ? "not " : "");
}

void xhci_print_status(struct xhci_hcd *xhci)
static void xhci_print_status(struct xhci_hcd *xhci)
{
	u32 temp;

@@ -138,14 +138,14 @@ void xhci_print_status(struct xhci_hcd *xhci)
			(temp & STS_HALT) ? "halted" : "running");
}

void xhci_print_op_regs(struct xhci_hcd *xhci)
static void xhci_print_op_regs(struct xhci_hcd *xhci)
{
	xhci_dbg(xhci, "xHCI operational registers at %p:\n", xhci->op_regs);
	xhci_print_command_reg(xhci);
	xhci_print_status(xhci);
}

void xhci_print_ports(struct xhci_hcd *xhci)
static void xhci_print_ports(struct xhci_hcd *xhci)
{
	u32 __iomem *addr;
	int i, j;
@@ -340,13 +340,13 @@ void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
	xhci_dbg(xhci, "Ring deq = %p (virt), 0x%llx (dma)\n",
			ring->dequeue,
			(unsigned long long)trb_virt_to_dma(ring->deq_seg,
			(unsigned long long)xhci_trb_virt_to_dma(ring->deq_seg,
							    ring->dequeue));
	xhci_dbg(xhci, "Ring deq updated %u times\n",
			ring->deq_updates);
	xhci_dbg(xhci, "Ring enq = %p (virt), 0x%llx (dma)\n",
			ring->enqueue,
			(unsigned long long)trb_virt_to_dma(ring->enq_seg,
			(unsigned long long)xhci_trb_virt_to_dma(ring->enq_seg,
							    ring->enqueue));
	xhci_dbg(xhci, "Ring enq updated %u times\n",
			ring->enq_updates);
+23 −19
Original line number Diff line number Diff line
@@ -291,7 +291,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
}

#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
void event_ring_work(unsigned long arg)
void xhci_event_ring_work(unsigned long arg)
{
	unsigned long flags;
	int temp;
@@ -330,8 +330,8 @@ void event_ring_work(unsigned long arg)
	}

	if (xhci->noops_submitted != NUM_TEST_NOOPS)
		if (setup_one_noop(xhci))
			ring_cmd_db(xhci);
		if (xhci_setup_one_noop(xhci))
			xhci_ring_cmd_db(xhci);
	spin_unlock_irqrestore(&xhci->lock, flags);

	if (!xhci->zombie)
@@ -374,7 +374,7 @@ int xhci_run(struct usb_hcd *hcd)
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
	init_timer(&xhci->event_ring_timer);
	xhci->event_ring_timer.data = (unsigned long) xhci;
	xhci->event_ring_timer.function = event_ring_work;
	xhci->event_ring_timer.function = xhci_event_ring_work;
	/* Poll the event ring */
	xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
	xhci->zombie = 0;
@@ -404,7 +404,7 @@ int xhci_run(struct usb_hcd *hcd)
	xhci_print_ir_set(xhci, xhci->ir_set, 0);

	if (NUM_TEST_NOOPS > 0)
		doorbell = setup_one_noop(xhci);
		doorbell = xhci_setup_one_noop(xhci);

	xhci_dbg(xhci, "Command ring memory map follows:\n");
	xhci_debug_ring(xhci, xhci->cmd_ring);
@@ -600,9 +600,11 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
		goto exit;
	}
	if (usb_endpoint_xfer_control(&urb->ep->desc))
		ret = queue_ctrl_tx(xhci, mem_flags, urb, slot_id, ep_index);
		ret = xhci_queue_ctrl_tx(xhci, mem_flags, urb,
				slot_id, ep_index);
	else if (usb_endpoint_xfer_bulk(&urb->ep->desc))
		ret = queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
		ret = xhci_queue_bulk_tx(xhci, mem_flags, urb,
				slot_id, ep_index);
	else
		ret = -EINVAL;
exit:
@@ -668,8 +670,8 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
	 * the first cancellation to be handled.
	 */
	if (ep_ring->cancels_pending == 1) {
		queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index);
		ring_cmd_db(xhci);
		xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index);
		xhci_ring_cmd_db(xhci);
	}
done:
	spin_unlock_irqrestore(&xhci->lock, flags);
@@ -913,13 +915,14 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
	xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma,
			LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info));

	ret = queue_configure_endpoint(xhci, virt_dev->in_ctx_dma, udev->slot_id);
	ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx_dma,
			udev->slot_id);
	if (ret < 0) {
		xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
		spin_unlock_irqrestore(&xhci->lock, flags);
		return -ENOMEM;
	}
	ring_cmd_db(xhci);
	xhci_ring_cmd_db(xhci);
	spin_unlock_irqrestore(&xhci->lock, flags);

	/* Wait for the configure endpoint command to complete */
@@ -1033,12 +1036,12 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
		return;

	spin_lock_irqsave(&xhci->lock, flags);
	if (queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
	if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
		spin_unlock_irqrestore(&xhci->lock, flags);
		xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
		return;
	}
	ring_cmd_db(xhci);
	xhci_ring_cmd_db(xhci);
	spin_unlock_irqrestore(&xhci->lock, flags);
	/*
	 * Event command completion handler will free any data structures
@@ -1058,13 +1061,13 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
	int ret;

	spin_lock_irqsave(&xhci->lock, flags);
	ret = queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
	ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
	if (ret) {
		spin_unlock_irqrestore(&xhci->lock, flags);
		xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
		return 0;
	}
	ring_cmd_db(xhci);
	xhci_ring_cmd_db(xhci);
	spin_unlock_irqrestore(&xhci->lock, flags);

	/* XXX: how much time for xHC slot assignment? */
@@ -1086,8 +1089,8 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
	if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) {
		/* Disable slot, if we can do it without mem alloc */
		xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
		if (!queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
			ring_cmd_db(xhci);
		if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
			xhci_ring_cmd_db(xhci);
		spin_unlock_irqrestore(&xhci->lock, flags);
		return 0;
	}
@@ -1129,13 +1132,14 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
		xhci_setup_addressable_virt_dev(xhci, udev);
	/* Otherwise, assume the core has the device configured how it wants */

	ret = queue_address_device(xhci, virt_dev->in_ctx_dma, udev->slot_id);
	ret = xhci_queue_address_device(xhci, virt_dev->in_ctx_dma,
			udev->slot_id);
	if (ret) {
		spin_unlock_irqrestore(&xhci->lock, flags);
		xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
		return ret;
	}
	ring_cmd_db(xhci);
	xhci_ring_cmd_db(xhci);
	spin_unlock_irqrestore(&xhci->lock, flags);

	/* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
+1 −1
Original line number Diff line number Diff line
@@ -746,7 +746,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
	xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]);

	/* Set the event ring dequeue address */
	set_hc_event_deq(xhci);
	xhci_set_hc_event_deq(xhci);
	xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
	xhci_print_ir_set(xhci, xhci->ir_set, 0);

+37 −35
Original line number Diff line number Diff line
@@ -71,7 +71,7 @@
 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
 * address of the TRB.
 */
dma_addr_t trb_virt_to_dma(struct xhci_segment *seg,
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
		union xhci_trb *trb)
{
	dma_addr_t offset;
@@ -235,12 +235,12 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
	return 1;
}

void set_hc_event_deq(struct xhci_hcd *xhci)
void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
{
	u32 temp;
	dma_addr_t deq;

	deq = trb_virt_to_dma(xhci->event_ring->deq_seg,
	deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
			xhci->event_ring->dequeue);
	if (deq == 0 && !in_interrupt())
		xhci_warn(xhci, "WARN something wrong with SW event ring "
@@ -256,7 +256,7 @@ void set_hc_event_deq(struct xhci_hcd *xhci)
}

/* Ring the host controller doorbell after placing a command on the ring */
void ring_cmd_db(struct xhci_hcd *xhci)
void xhci_ring_cmd_db(struct xhci_hcd *xhci)
{
	u32 temp;

@@ -371,7 +371,7 @@ static void find_new_dequeue_state(struct xhci_hcd *xhci,
	ep_ring->deq_seg = state->new_deq_seg;
}

void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
		struct xhci_td *cur_td)
{
	struct xhci_segment *cur_seg;
@@ -390,7 +390,7 @@ void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
			xhci_dbg(xhci, "Address = %p (0x%llx dma); "
					"in seg %p (0x%llx dma)\n",
					cur_trb,
					(unsigned long long)trb_virt_to_dma(cur_seg, cur_trb),
					(unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
					cur_seg,
					(unsigned long long)cur_seg->dma);
		} else {
@@ -403,7 +403,7 @@ void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
			xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
					"in seg %p (0x%llx dma)\n",
					cur_trb,
					(unsigned long long)trb_virt_to_dma(cur_seg, cur_trb),
					(unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
					cur_seg,
					(unsigned long long)cur_seg->dma);
		}
@@ -458,7 +458,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
		cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
		xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
				cur_td->first_trb,
				(unsigned long long)trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
				(unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
		/*
		 * If we stopped on the TD we need to cancel, then we have to
		 * move the xHC endpoint ring dequeue pointer past this TD.
@@ -485,7 +485,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
				deq_state.new_deq_seg,
				(unsigned long long)deq_state.new_deq_seg->dma,
				deq_state.new_deq_ptr,
				(unsigned long long)trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr),
				(unsigned long long)xhci_trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr),
				deq_state.new_cycle_state);
		queue_set_tr_deq(xhci, slot_id, ep_index,
				deq_state.new_deq_seg,
@@ -497,7 +497,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
		 * ring running.
		 */
		ep_ring->state |= SET_DEQ_PENDING;
		ring_cmd_db(xhci);
		xhci_ring_cmd_db(xhci);
	} else {
		/* Otherwise just ring the doorbell to restart the ring */
		ring_ep_doorbell(xhci, slot_id, ep_index);
@@ -612,7 +612,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
	dma_addr_t cmd_dequeue_dma;

	cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0];
	cmd_dequeue_dma = trb_virt_to_dma(xhci->cmd_ring->deq_seg,
	cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
			xhci->cmd_ring->dequeue);
	/* Is the command ring deq ptr out of sync with the deq seg ptr? */
	if (cmd_dequeue_dma == 0) {
@@ -677,7 +677,7 @@ static void handle_port_status(struct xhci_hcd *xhci,

	/* Update event ring dequeue pointer before dropping the lock */
	inc_deq(xhci, xhci->event_ring, true);
	set_hc_event_deq(xhci);
	xhci_set_hc_event_deq(xhci);

	spin_unlock(&xhci->lock);
	/* Pass this up to the core */
@@ -702,15 +702,15 @@ static struct xhci_segment *trb_in_td(
	dma_addr_t end_trb_dma;
	struct xhci_segment *cur_seg;

	start_dma = trb_virt_to_dma(start_seg, start_trb);
	start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
	cur_seg = start_seg;

	do {
		/* We may get an event for a Link TRB in the middle of a TD */
		end_seg_dma = trb_virt_to_dma(cur_seg,
		end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
				&start_seg->trbs[TRBS_PER_SEGMENT - 1]);
		/* If the end TRB isn't in this segment, this is set to 0 */
		end_trb_dma = trb_virt_to_dma(cur_seg, end_trb);
		end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);

		if (end_trb_dma > 0) {
			/* The end TRB is in this segment, so suspect should be here */
@@ -734,7 +734,7 @@ static struct xhci_segment *trb_in_td(
				return cur_seg;
		}
		cur_seg = cur_seg->next;
		start_dma = trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
		start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
	} while (1);

}
@@ -992,7 +992,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
	}
cleanup:
	inc_deq(xhci, xhci->event_ring, true);
	set_hc_event_deq(xhci);
	xhci_set_hc_event_deq(xhci);

	/* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */
	if (urb) {
@@ -1050,7 +1050,7 @@ void xhci_handle_event(struct xhci_hcd *xhci)
	if (update_ptrs) {
		/* Update SW and HC event ring dequeue pointer */
		inc_deq(xhci, xhci->event_ring, true);
		set_hc_event_deq(xhci);
		xhci_set_hc_event_deq(xhci);
	}
	/* Are there more items on the event ring? */
	xhci_handle_event(xhci);
@@ -1119,7 +1119,7 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
	return 0;
}

int xhci_prepare_transfer(struct xhci_hcd *xhci,
static int prepare_transfer(struct xhci_hcd *xhci,
		struct xhci_virt_device *xdev,
		unsigned int ep_index,
		unsigned int num_trbs,
@@ -1156,7 +1156,7 @@ int xhci_prepare_transfer(struct xhci_hcd *xhci,
	return 0;
}

unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
{
	int num_sgs, num_trbs, running_total, temp, i;
	struct scatterlist *sg;
@@ -1200,7 +1200,7 @@ unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
	return num_trbs;
}

void check_trb_math(struct urb *urb, int num_trbs, int running_total)
static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
{
	if (num_trbs != 0)
		dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
@@ -1216,7 +1216,7 @@ void check_trb_math(struct urb *urb, int num_trbs, int running_total)
				urb->transfer_buffer_length);
}

void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
		unsigned int ep_index, int start_cycle,
		struct xhci_generic_trb *start_trb, struct xhci_td *td)
{
@@ -1229,7 +1229,7 @@ void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
	ring_ep_doorbell(xhci, slot_id, ep_index);
}

int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ring *ep_ring;
@@ -1248,7 +1248,7 @@ int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
	num_trbs = count_sg_trbs_needed(xhci, urb);
	num_sgs = urb->num_sgs;

	trb_buff_len = xhci_prepare_transfer(xhci, xhci->devs[slot_id],
	trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
			ep_index, num_trbs, urb, &td, mem_flags);
	if (trb_buff_len < 0)
		return trb_buff_len;
@@ -1356,7 +1356,7 @@ int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}

/* This is very similar to what ehci-q.c qtd_fill() does */
int queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ring *ep_ring;
@@ -1400,7 +1400,7 @@ int queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
				(unsigned long long)urb->transfer_dma,
				num_trbs);

	ret = xhci_prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
	ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
			num_trbs, urb, &td, mem_flags);
	if (ret < 0)
		return ret;
@@ -1469,7 +1469,7 @@ int queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}

/* Caller must have locked xhci->lock */
int queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
		struct urb *urb, int slot_id, unsigned int ep_index)
{
	struct xhci_ring *ep_ring;
@@ -1502,7 +1502,7 @@ int queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
	 */
	if (urb->transfer_buffer_length > 0)
		num_trbs++;
	ret = xhci_prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs,
	ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs,
			urb, &td, mem_flags);
	if (ret < 0)
		return ret;
@@ -1584,36 +1584,38 @@ static int queue_cmd_noop(struct xhci_hcd *xhci)
 * Place a no-op command on the command ring to test the command and
 * event ring.
 */
void *setup_one_noop(struct xhci_hcd *xhci)
void *xhci_setup_one_noop(struct xhci_hcd *xhci)
{
	if (queue_cmd_noop(xhci) < 0)
		return NULL;
	xhci->noops_submitted++;
	return ring_cmd_db;
	return xhci_ring_cmd_db;
}

/* Queue a slot enable or disable request on the command ring */
int queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
{
	return queue_command(xhci, 0, 0, 0,
			TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id));
}

/* Queue an address device command TRB */
int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id)
int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
		u32 slot_id)
{
	return queue_command(xhci, in_ctx_ptr, 0, 0,
			TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id));
}

/* Queue a configure endpoint command TRB */
int queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id)
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
		u32 slot_id)
{
	return queue_command(xhci, in_ctx_ptr, 0, 0,
			TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id));
}

int queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
		unsigned int ep_index)
{
	u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
@@ -1636,7 +1638,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
	u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
	u32 type = TRB_TYPE(TRB_SET_DEQ);

	addr = trb_virt_to_dma(deq_seg, deq_ptr);
	addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
	if (addr == 0)
		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
		xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
+14 −10
Original line number Diff line number Diff line
@@ -1130,18 +1130,22 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);

/* xHCI ring, segment, TRB, and TD functions */
dma_addr_t trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
void ring_cmd_db(struct xhci_hcd *xhci);
void *setup_one_noop(struct xhci_hcd *xhci);
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
void xhci_ring_cmd_db(struct xhci_hcd *xhci);
void *xhci_setup_one_noop(struct xhci_hcd *xhci);
void xhci_handle_event(struct xhci_hcd *xhci);
void set_hc_event_deq(struct xhci_hcd *xhci);
int queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id);
int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id);
int queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
void xhci_set_hc_event_deq(struct xhci_hcd *xhci);
int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id);
int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
		u32 slot_id);
int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
		unsigned int ep_index);
int queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index);
int queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index);
int queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id);
int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
		int slot_id, unsigned int ep_index);
int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
		int slot_id, unsigned int ep_index);
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
		u32 slot_id);

/* xHCI roothub code */
int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,