Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ef87979c authored by Stephen Hemminger's avatar Stephen Hemminger Committed by David S. Miller
Browse files

pktgen: better scheduler friendliness



Previous update did not resched in inner loop causing watchdogs.
Rewrite inner loop to:
  * account for delays better with less clock calls
  * more accurate timing of delay:
    - only delay if packet was successfully sent
    - if delay is 100ns and it takes 10ns to build packet then
      account for that
  * use wait_event_interruptible_timeout rather than open coding it.

Signed-off-by: default avatarStephen Hemminger <shemminger@vyatta.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 6b80d6a6
Loading
Loading
Loading
Loading
+72 −79
Original line number Diff line number Diff line
@@ -2104,7 +2104,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)

static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
{
	ktime_t start;
	ktime_t start_time, end_time;
	s32 remaining;
	struct hrtimer_sleeper t;

@@ -2115,7 +2115,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
	if (remaining <= 0)
		return;

	start = ktime_now();
	start_time = ktime_now();
	if (remaining < 100)
		udelay(remaining); 	/* really small just spin */
	else {
@@ -2134,7 +2134,10 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
		} while (t.task && pkt_dev->running && !signal_pending(current));
		__set_current_state(TASK_RUNNING);
	}
	pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), start));
	end_time = ktime_now();

	pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
	pkt_dev->next_tx = ktime_add_ns(end_time, pkt_dev->delay);
}

static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
@@ -3364,19 +3367,29 @@ static void pktgen_rem_thread(struct pktgen_thread *t)
	mutex_unlock(&pktgen_thread_lock);
}

static void idle(struct pktgen_dev *pkt_dev)
static void pktgen_resched(struct pktgen_dev *pkt_dev)
{
	ktime_t idle_start = ktime_now();
	schedule();
	pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start));
}

static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
{
	ktime_t idle_start = ktime_now();

	while (atomic_read(&(pkt_dev->skb->users)) != 1) {
		if (signal_pending(current))
			break;

		if (need_resched())
		schedule();
			pktgen_resched(pkt_dev);
		else
			cpu_relax();

	}
	pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start));
}


static void pktgen_xmit(struct pktgen_dev *pkt_dev)
{
	struct net_device *odev = pkt_dev->odev;
@@ -3386,36 +3399,21 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
	u16 queue_map;
	int ret;

	if (pkt_dev->delay) {
		spin(pkt_dev, pkt_dev->next_tx);
	/* If device is offline, then don't send */
	if (unlikely(!netif_running(odev) || !netif_carrier_ok(odev))) {
		pktgen_stop_device(pkt_dev);
		return;
	}

	/* This is max DELAY, this has special meaning of
	 * "never transmit"
	 */
		if (pkt_dev->delay == ULLONG_MAX) {
	if (unlikely(pkt_dev->delay == ULLONG_MAX)) {
		pkt_dev->next_tx = ktime_add_ns(ktime_now(), ULONG_MAX);
		return;
	}
	}

	if (!pkt_dev->skb) {
		set_cur_queue_map(pkt_dev);
		queue_map = pkt_dev->cur_queue_map;
	} else {
		queue_map = skb_get_queue_mapping(pkt_dev->skb);
	}

	txq = netdev_get_tx_queue(odev, queue_map);
	/* Did we saturate the queue already? */
	if (netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)) {
		/* If device is down, then all queues are permnantly frozen */
		if (netif_running(odev))
			idle(pkt_dev);
		else
			pktgen_stop_device(pkt_dev);
		return;
	}

	/* If no skb or clone count exhausted then get new one */
	if (!pkt_dev->skb || (pkt_dev->last_ok &&
			      ++pkt_dev->clone_count >= pkt_dev->clone_skb)) {
		/* build a new pkt */
@@ -3434,18 +3432,20 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
		pkt_dev->clone_count = 0;	/* reset counter */
	}

	/* fill_packet() might have changed the queue */
	if (pkt_dev->delay && pkt_dev->last_ok)
		spin(pkt_dev, pkt_dev->next_tx);

	queue_map = skb_get_queue_mapping(pkt_dev->skb);
	txq = netdev_get_tx_queue(odev, queue_map);

	__netif_tx_lock_bh(txq);
	if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)))
		pkt_dev->last_ok = 0;
	else {
	atomic_inc(&(pkt_dev->skb->users));

	retry_now:
	if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)))
		ret = NETDEV_TX_BUSY;
	else
		ret = (*xmit)(pkt_dev->skb, odev);

	switch (ret) {
	case NETDEV_TX_OK:
		txq_trans_update(txq);
@@ -3454,34 +3454,23 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
		pkt_dev->seq_num++;
		pkt_dev->tx_bytes += pkt_dev->cur_pkt_size;
		break;
		case NETDEV_TX_LOCKED:
			cpu_relax();
			goto retry_now;
	default: /* Drivers are not supposed to return other values! */
		if (net_ratelimit())
			pr_info("pktgen: %s xmit error: %d\n",
				odev->name, ret);
		pkt_dev->errors++;
		/* fallthru */
	case NETDEV_TX_LOCKED:
	case NETDEV_TX_BUSY:
		/* Retry it next time */
		atomic_dec(&(pkt_dev->skb->users));
		pkt_dev->last_ok = 0;
	}

		if (pkt_dev->delay)
			pkt_dev->next_tx = ktime_add_ns(ktime_now(),
							pkt_dev->delay);
	}
	__netif_tx_unlock_bh(txq);

	/* If pkt_dev->count is zero, then run forever */
	if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
		while (atomic_read(&(pkt_dev->skb->users)) != 1) {
			if (signal_pending(current))
				break;
			idle(pkt_dev);
		}
		pktgen_wait_for_skb(pkt_dev);

		/* Done with this */
		pktgen_stop_device(pkt_dev);
@@ -3514,20 +3503,24 @@ static int pktgen_thread_worker(void *arg)
	while (!kthread_should_stop()) {
		pkt_dev = next_to_run(t);

		if (!pkt_dev &&
		    (t->control & (T_STOP | T_RUN | T_REMDEVALL | T_REMDEV))
		    == 0) {
			prepare_to_wait(&(t->queue), &wait,
					TASK_INTERRUPTIBLE);
			schedule_timeout(HZ / 10);
			finish_wait(&(t->queue), &wait);
		if (unlikely(!pkt_dev && t->control == 0)) {
			wait_event_interruptible_timeout(t->queue,
							 t->control != 0,
							 HZ/10);
			continue;
		}

		__set_current_state(TASK_RUNNING);

		if (pkt_dev)
		if (likely(pkt_dev)) {
			pktgen_xmit(pkt_dev);

			if (need_resched())
				pktgen_resched(pkt_dev);
			else
				cpu_relax();
		}

		if (t->control & T_STOP) {
			pktgen_stop(t);
			t->control &= ~(T_STOP);