Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1e3452e3 authored by Hans de Goede's avatar Hans de Goede Committed by Greg Kroah-Hartman
Browse files

xhci: Move allocating of command for new_dequeue_state to queue_set_tr_deq()



There are multiple reasons for this:

1) This fixes a missing check for xhci_alloc_command failing in
   xhci_handle_cmd_stop_ep()
2) This adds a warning when we cannot set the new dequeue state because of
   xhci_alloc_command failing
3) It puts the allocation of the command after the sanity checks in
   queue_set_tr_deq(), avoiding leaking the command if those fail
4) Since queue_set_tr_deq now owns the command it can free it if queue_command
   fails
5) It reduces code duplication

Signed-off-by: default avatarHans de Goede <hdegoede@redhat.com>
Signed-off-by: default avatarMathias Nyman <mathias.nyman@linux.intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent fac1f485
Loading
Loading
Loading
Loading
+22 −13
Original line number Original line Diff line number Diff line
@@ -572,14 +572,12 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
	}
	}
}
}


static int queue_set_tr_deq(struct xhci_hcd *xhci,
static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
		struct xhci_command *cmd, int slot_id,
		unsigned int ep_index, unsigned int stream_id,
		unsigned int ep_index, unsigned int stream_id,
		struct xhci_segment *deq_seg,
		struct xhci_segment *deq_seg,
		union xhci_trb *deq_ptr, u32 cycle_state);
		union xhci_trb *deq_ptr, u32 cycle_state);


void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
		struct xhci_command *cmd,
		unsigned int slot_id, unsigned int ep_index,
		unsigned int slot_id, unsigned int ep_index,
		unsigned int stream_id,
		unsigned int stream_id,
		struct xhci_dequeue_state *deq_state)
		struct xhci_dequeue_state *deq_state)
@@ -594,7 +592,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
			deq_state->new_deq_ptr,
			deq_state->new_deq_ptr,
			(unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
			(unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
			deq_state->new_cycle_state);
			deq_state->new_cycle_state);
	queue_set_tr_deq(xhci, cmd, slot_id, ep_index, stream_id,
	queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
			deq_state->new_deq_seg,
			deq_state->new_deq_seg,
			deq_state->new_deq_ptr,
			deq_state->new_deq_ptr,
			(u32) deq_state->new_cycle_state);
			(u32) deq_state->new_cycle_state);
@@ -743,12 +741,8 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,


	/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
	/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
	if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
	if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
		struct xhci_command *command;
		xhci_queue_new_dequeue_state(xhci, slot_id, ep_index,
		command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
				ep->stopped_td->urb->stream_id, &deq_state);
		xhci_queue_new_dequeue_state(xhci, command,
				slot_id, ep_index,
				ep->stopped_td->urb->stream_id,
				&deq_state);
		xhci_ring_cmd_db(xhci);
		xhci_ring_cmd_db(xhci);
	} else {
	} else {
		/* Otherwise ring the doorbell(s) to restart queued transfers */
		/* Otherwise ring the doorbell(s) to restart queued transfers */
@@ -3929,8 +3923,7 @@ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
/* Set Transfer Ring Dequeue Pointer command.
/* Set Transfer Ring Dequeue Pointer command.
 * This should not be used for endpoints that have streams enabled.
 * This should not be used for endpoints that have streams enabled.
 */
 */
static int queue_set_tr_deq(struct xhci_hcd *xhci, struct xhci_command *cmd,
static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
			int slot_id,
			unsigned int ep_index, unsigned int stream_id,
			unsigned int ep_index, unsigned int stream_id,
			struct xhci_segment *deq_seg,
			struct xhci_segment *deq_seg,
			union xhci_trb *deq_ptr, u32 cycle_state)
			union xhci_trb *deq_ptr, u32 cycle_state)
@@ -3942,6 +3935,8 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, struct xhci_command *cmd,
	u32 trb_sct = 0;
	u32 trb_sct = 0;
	u32 type = TRB_TYPE(TRB_SET_DEQ);
	u32 type = TRB_TYPE(TRB_SET_DEQ);
	struct xhci_virt_ep *ep;
	struct xhci_virt_ep *ep;
	struct xhci_command *cmd;
	int ret;


	addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
	addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
	if (addr == 0) {
	if (addr == 0) {
@@ -3956,14 +3951,28 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, struct xhci_command *cmd,
		xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
		xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
		return 0;
		return 0;
	}
	}

	/* This function gets called from contexts where it cannot sleep */
	cmd = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
	if (!cmd) {
		xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr: ENOMEM\n");
		return 0;
	}

	ep->queued_deq_seg = deq_seg;
	ep->queued_deq_seg = deq_seg;
	ep->queued_deq_ptr = deq_ptr;
	ep->queued_deq_ptr = deq_ptr;
	if (stream_id)
	if (stream_id)
		trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
		trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
	return queue_command(xhci, cmd,
	ret = queue_command(xhci, cmd,
			lower_32_bits(addr) | trb_sct | cycle_state,
			lower_32_bits(addr) | trb_sct | cycle_state,
			upper_32_bits(addr), trb_stream_id,
			upper_32_bits(addr), trb_stream_id,
			trb_slot_id | trb_ep_index | type, false);
			trb_slot_id | trb_ep_index | type, false);
	if (ret < 0) {
		xhci_free_command(xhci, cmd);
		return ret;
	}

	return 0;
}
}


int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
+1 −6
Original line number Original line Diff line number Diff line
@@ -2887,14 +2887,9 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
	 * issue a configure endpoint command later.
	 * issue a configure endpoint command later.
	 */
	 */
	if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
	if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
		struct xhci_command *command;
		/* Can't sleep if we're called from cleanup_halted_endpoint() */
		command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
		if (!command)
			return;
		xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
		xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
				"Queueing new dequeue state");
				"Queueing new dequeue state");
		xhci_queue_new_dequeue_state(xhci, command, udev->slot_id,
		xhci_queue_new_dequeue_state(xhci, udev->slot_id,
				ep_index, ep->stopped_stream, &deq_state);
				ep_index, ep->stopped_stream, &deq_state);
	} else {
	} else {
		/* Better hope no one uses the input context between now and the
		/* Better hope no one uses the input context between now and the
+0 −1
Original line number Original line Diff line number Diff line
@@ -1839,7 +1839,6 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
		unsigned int stream_id, struct xhci_td *cur_td,
		unsigned int stream_id, struct xhci_td *cur_td,
		struct xhci_dequeue_state *state);
		struct xhci_dequeue_state *state);
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
		struct xhci_command *cmd,
		unsigned int slot_id, unsigned int ep_index,
		unsigned int slot_id, unsigned int ep_index,
		unsigned int stream_id,
		unsigned int stream_id,
		struct xhci_dequeue_state *deq_state);
		struct xhci_dequeue_state *deq_state);