Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 20dc3811 authored by Neil Horman's avatar Neil Horman Committed by James Bottomley
Browse files

[SCSI] fcoe: reduce contention for fcoe_rx_list lock [v2]



There is potentially lots of contention for the rx_list_lock.  On a cpu that is
receiving lots of fcoe traffic, the softirq context has to add and release the
lock for every frame it receives, as does the receiving per-cpu thread.  We can
reduce this contention somewhat by altering the per-cpu threads loop such that
when traffic is detected on the fcoe_rx_list, we splice it to a temporary list.
In this way, we can process multiple skbs while only having to acquire and
release the fcoe_rx_list lock once.

[ Braces around single statement while loop removed by Robert Love
  to satisfy checkpath.pl. ]

Signed-off-by: default avatarNeil Horman <nhorman@tuxdriver.com>
Acked-by: default avatarVasu Dev <vasu.dev@intel.com>
Signed-off-by: default avatarRobert Love <robert.w.love@intel.com>
Signed-off-by: default avatarJames Bottomley <JBottomley@Parallels.com>
parent dd060e74
Loading
Loading
Loading
Loading
+14 −8
Original line number Original line Diff line number Diff line
@@ -1471,7 +1471,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
	 * in softirq context.
	 * in softirq context.
	 */
	 */
	__skb_queue_tail(&fps->fcoe_rx_list, skb);
	__skb_queue_tail(&fps->fcoe_rx_list, skb);
	if (fps->fcoe_rx_list.qlen == 1)
	if (fps->thread->state == TASK_INTERRUPTIBLE)
		wake_up_process(fps->thread);
		wake_up_process(fps->thread);
	spin_unlock(&fps->fcoe_rx_list.lock);
	spin_unlock(&fps->fcoe_rx_list.lock);


@@ -1790,23 +1790,29 @@ static int fcoe_percpu_receive_thread(void *arg)
{
{
	struct fcoe_percpu_s *p = arg;
	struct fcoe_percpu_s *p = arg;
	struct sk_buff *skb;
	struct sk_buff *skb;
	struct sk_buff_head tmp;

	skb_queue_head_init(&tmp);


	set_user_nice(current, -20);
	set_user_nice(current, -20);


	while (!kthread_should_stop()) {
	while (!kthread_should_stop()) {


		spin_lock_bh(&p->fcoe_rx_list.lock);
		spin_lock_bh(&p->fcoe_rx_list.lock);
		while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
		skb_queue_splice_init(&p->fcoe_rx_list, &tmp);
		spin_unlock_bh(&p->fcoe_rx_list.lock);

		while ((skb = __skb_dequeue(&tmp)) != NULL)
			fcoe_recv_frame(skb);

		spin_lock_bh(&p->fcoe_rx_list.lock);
		if (!skb_queue_len(&p->fcoe_rx_list)) {
			set_current_state(TASK_INTERRUPTIBLE);
			set_current_state(TASK_INTERRUPTIBLE);
			spin_unlock_bh(&p->fcoe_rx_list.lock);
			spin_unlock_bh(&p->fcoe_rx_list.lock);
			schedule();
			schedule();
			set_current_state(TASK_RUNNING);
			set_current_state(TASK_RUNNING);
			if (kthread_should_stop())
		} else
				return 0;
			spin_lock_bh(&p->fcoe_rx_list.lock);
		}
			spin_unlock_bh(&p->fcoe_rx_list.lock);
			spin_unlock_bh(&p->fcoe_rx_list.lock);
		fcoe_recv_frame(skb);
	}
	}
	return 0;
	return 0;
}
}