Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a620865e authored by Ido Yariv's avatar Ido Yariv Committed by Luciano Coelho
Browse files

wl12xx: Switch to a threaded interrupt handler



To achieve maximal throughput, it is very important to react to
interrupts as soon as possible. Currently the interrupt handler wakes up
a worker for handling interrupts in process context. A cleaner and more
efficient design would be to request a threaded interrupt handler.  This
handler's priority is very high, and can do blocking operations such as
SDIO/SPI transactions.

Some work can be deferred, mostly calls to mac80211 APIs
(ieee80211_rx_ni and ieee80211_tx_status). By deferring such work to a
different worker, we can keep the irq handler thread more I/O
responsive. In addition, on multi-core systems the two threads can be
scheduled on different cores, which will improve overall performance.

The use of WL1271_FLAG_IRQ_PENDING & WL1271_FLAG_IRQ_RUNNING was
changed. For simplicity, always query the FW for more pending
interrupts. Since there are relatively long bursts of interrupts, the
extra FW status read overhead is negligible. In addition, this enables
registering the IRQ handler with the ONESHOT option.

Signed-off-by: default avatarIdo Yariv <ido@wizery.com>
Reviewed-by: default avatarLuciano Coelho <coelho@ti.com>
Signed-off-by: default avatarLuciano Coelho <coelho@ti.com>
parent 393fb560
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -99,7 +99,7 @@ static void wl1271_debugfs_update_stats(struct wl1271 *wl)

	mutex_lock(&wl->mutex);

	ret = wl1271_ps_elp_wakeup(wl, false);
	ret = wl1271_ps_elp_wakeup(wl);
	if (ret < 0)
		goto out;

+1 −0
Original line number Diff line number Diff line
@@ -168,5 +168,6 @@ void wl1271_unregister_hw(struct wl1271 *wl);
int wl1271_init_ieee80211(struct wl1271 *wl);
struct ieee80211_hw *wl1271_alloc_hw(void);
int wl1271_free_hw(struct wl1271 *wl);
irqreturn_t wl1271_irq(int irq, void *data);

#endif
+79 −48
Original line number Diff line number Diff line
@@ -374,7 +374,7 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
	if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
		goto out;

	ret = wl1271_ps_elp_wakeup(wl, false);
	ret = wl1271_ps_elp_wakeup(wl);
	if (ret < 0)
		goto out;

@@ -635,16 +635,39 @@ static void wl1271_fw_status(struct wl1271 *wl,
		(s64)le32_to_cpu(status->fw_localtime);
}

#define WL1271_IRQ_MAX_LOOPS 10
static void wl1271_flush_deferred_work(struct wl1271 *wl)
{
	struct sk_buff *skb;

	/* Pass all received frames to the network stack */
	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
		ieee80211_rx_ni(wl->hw, skb);

	/* Return sent skbs to the network stack */
	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
		ieee80211_tx_status(wl->hw, skb);
}

static void wl1271_netstack_work(struct work_struct *work)
{
	struct wl1271 *wl =
		container_of(work, struct wl1271, netstack_work);

	do {
		wl1271_flush_deferred_work(wl);
	} while (skb_queue_len(&wl->deferred_rx_queue));
}

#define WL1271_IRQ_MAX_LOOPS 256

static void wl1271_irq_work(struct work_struct *work)
irqreturn_t wl1271_irq(int irq, void *cookie)
{
	int ret;
	u32 intr;
	int loopcount = WL1271_IRQ_MAX_LOOPS;
	unsigned long flags;
	struct wl1271 *wl =
		container_of(work, struct wl1271, irq_work);
	struct wl1271 *wl = (struct wl1271 *)cookie;
	bool done = false;
	unsigned int defer_count;

	mutex_lock(&wl->mutex);

@@ -653,26 +676,27 @@ static void wl1271_irq_work(struct work_struct *work)
	if (unlikely(wl->state == WL1271_STATE_OFF))
		goto out;

	ret = wl1271_ps_elp_wakeup(wl, true);
	ret = wl1271_ps_elp_wakeup(wl);
	if (ret < 0)
		goto out;

	spin_lock_irqsave(&wl->wl_lock, flags);
	while (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags) && loopcount) {
		clear_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
		spin_unlock_irqrestore(&wl->wl_lock, flags);
		loopcount--;
	while (!done && loopcount--) {
		/*
		 * In order to avoid a race with the hardirq, clear the flag
		 * before acknowledging the chip. Since the mutex is held,
		 * wl1271_ps_elp_wakeup cannot be called concurrently.
		 */
		clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
		smp_mb__after_clear_bit();

		wl1271_fw_status(wl, wl->fw_status);
		intr = le32_to_cpu(wl->fw_status->common.intr);
		intr &= WL1271_INTR_MASK;
		if (!intr) {
			wl1271_debug(DEBUG_IRQ, "Zero interrupt received.");
			spin_lock_irqsave(&wl->wl_lock, flags);
			done = true;
			continue;
		}

		intr &= WL1271_INTR_MASK;

		if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
			wl1271_error("watchdog interrupt received! "
				     "starting recovery.");
@@ -682,7 +706,7 @@ static void wl1271_irq_work(struct work_struct *work)
			goto out;
		}

		if (intr & WL1271_ACX_INTR_DATA) {
		if (likely(intr & WL1271_ACX_INTR_DATA)) {
			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");

			wl1271_rx(wl, &wl->fw_status->common);
@@ -701,6 +725,12 @@ static void wl1271_irq_work(struct work_struct *work)
			if (wl->fw_status->common.tx_results_counter !=
			    (wl->tx_results_count & 0xff))
				wl1271_tx_complete(wl);

			/* Make sure the deferred queues don't get too long */
			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
				      skb_queue_len(&wl->deferred_rx_queue);
			if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
				wl1271_flush_deferred_work(wl);
		}

		if (intr & WL1271_ACX_INTR_EVENT_A) {
@@ -719,21 +749,16 @@ static void wl1271_irq_work(struct work_struct *work)

		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");

		spin_lock_irqsave(&wl->wl_lock, flags);
	}

	if (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags))
		ieee80211_queue_work(wl->hw, &wl->irq_work);
	else
		clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
	spin_unlock_irqrestore(&wl->wl_lock, flags);

	wl1271_ps_elp_sleep(wl);

out:
	mutex_unlock(&wl->mutex);

	return IRQ_HANDLED;
}
EXPORT_SYMBOL_GPL(wl1271_irq);

static int wl1271_fetch_firmware(struct wl1271 *wl)
{
@@ -974,7 +999,6 @@ int wl1271_plt_start(struct wl1271 *wl)
		goto out;

irq_disable:
		wl1271_disable_interrupts(wl);
		mutex_unlock(&wl->mutex);
		/* Unlocking the mutex in the middle of handling is
		   inherently unsafe. In this case we deem it safe to do,
@@ -983,7 +1007,9 @@ int wl1271_plt_start(struct wl1271 *wl)
		   work function will not do anything.) Also, any other
		   possible concurrent operations will fail due to the
		   current state, hence the wl1271 struct should be safe. */
		cancel_work_sync(&wl->irq_work);
		wl1271_disable_interrupts(wl);
		wl1271_flush_deferred_work(wl);
		cancel_work_sync(&wl->netstack_work);
		mutex_lock(&wl->mutex);
power_off:
		wl1271_power_off(wl);
@@ -1010,14 +1036,15 @@ int __wl1271_plt_stop(struct wl1271 *wl)
		goto out;
	}

	wl1271_disable_interrupts(wl);
	wl1271_power_off(wl);

	wl->state = WL1271_STATE_OFF;
	wl->rx_counter = 0;

	mutex_unlock(&wl->mutex);
	cancel_work_sync(&wl->irq_work);
	wl1271_disable_interrupts(wl);
	wl1271_flush_deferred_work(wl);
	cancel_work_sync(&wl->netstack_work);
	cancel_work_sync(&wl->recovery_work);
	mutex_lock(&wl->mutex);
out:
@@ -1169,7 +1196,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
		break;

irq_disable:
		wl1271_disable_interrupts(wl);
		mutex_unlock(&wl->mutex);
		/* Unlocking the mutex in the middle of handling is
		   inherently unsafe. In this case we deem it safe to do,
@@ -1178,7 +1204,9 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
		   work function will not do anything.) Also, any other
		   possible concurrent operations will fail due to the
		   current state, hence the wl1271 struct should be safe. */
		cancel_work_sync(&wl->irq_work);
		wl1271_disable_interrupts(wl);
		wl1271_flush_deferred_work(wl);
		cancel_work_sync(&wl->netstack_work);
		mutex_lock(&wl->mutex);
power_off:
		wl1271_power_off(wl);
@@ -1244,12 +1272,12 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl)

	wl->state = WL1271_STATE_OFF;

	wl1271_disable_interrupts(wl);

	mutex_unlock(&wl->mutex);

	wl1271_disable_interrupts(wl);
	wl1271_flush_deferred_work(wl);
	cancel_delayed_work_sync(&wl->scan_complete_work);
	cancel_work_sync(&wl->irq_work);
	cancel_work_sync(&wl->netstack_work);
	cancel_work_sync(&wl->tx_work);
	cancel_delayed_work_sync(&wl->pspoll_work);
	cancel_delayed_work_sync(&wl->elp_work);
@@ -1525,7 +1553,7 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)

	is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);

	ret = wl1271_ps_elp_wakeup(wl, false);
	ret = wl1271_ps_elp_wakeup(wl);
	if (ret < 0)
		goto out;

@@ -1681,7 +1709,7 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
	if (unlikely(wl->state == WL1271_STATE_OFF))
		goto out;

	ret = wl1271_ps_elp_wakeup(wl, false);
	ret = wl1271_ps_elp_wakeup(wl);
	if (ret < 0)
		goto out;

@@ -1910,7 +1938,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
		goto out_unlock;
	}

	ret = wl1271_ps_elp_wakeup(wl, false);
	ret = wl1271_ps_elp_wakeup(wl);
	if (ret < 0)
		goto out_unlock;

@@ -2013,7 +2041,7 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
		goto out;
	}

	ret = wl1271_ps_elp_wakeup(wl, false);
	ret = wl1271_ps_elp_wakeup(wl);
	if (ret < 0)
		goto out;

@@ -2039,7 +2067,7 @@ static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
		goto out;
	}

	ret = wl1271_ps_elp_wakeup(wl, false);
	ret = wl1271_ps_elp_wakeup(wl);
	if (ret < 0)
		goto out;

@@ -2067,7 +2095,7 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
		goto out;
	}

	ret = wl1271_ps_elp_wakeup(wl, false);
	ret = wl1271_ps_elp_wakeup(wl);
	if (ret < 0)
		goto out;

@@ -2546,7 +2574,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
	if (unlikely(wl->state == WL1271_STATE_OFF))
		goto out;

	ret = wl1271_ps_elp_wakeup(wl, false);
	ret = wl1271_ps_elp_wakeup(wl);
	if (ret < 0)
		goto out;

@@ -2601,7 +2629,7 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
		conf_tid->apsd_conf[0] = 0;
		conf_tid->apsd_conf[1] = 0;
	} else {
		ret = wl1271_ps_elp_wakeup(wl, false);
		ret = wl1271_ps_elp_wakeup(wl);
		if (ret < 0)
			goto out;

@@ -2647,7 +2675,7 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw)
	if (unlikely(wl->state == WL1271_STATE_OFF))
		goto out;

	ret = wl1271_ps_elp_wakeup(wl, false);
	ret = wl1271_ps_elp_wakeup(wl);
	if (ret < 0)
		goto out;

@@ -2736,7 +2764,7 @@ static int wl1271_op_sta_add(struct ieee80211_hw *hw,
	if (ret < 0)
		goto out;

	ret = wl1271_ps_elp_wakeup(wl, false);
	ret = wl1271_ps_elp_wakeup(wl);
	if (ret < 0)
		goto out_free_sta;

@@ -2779,7 +2807,7 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
	if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
		goto out;

	ret = wl1271_ps_elp_wakeup(wl, false);
	ret = wl1271_ps_elp_wakeup(wl);
	if (ret < 0)
		goto out;

@@ -2812,7 +2840,7 @@ int wl1271_op_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
		goto out;
	}

	ret = wl1271_ps_elp_wakeup(wl, false);
	ret = wl1271_ps_elp_wakeup(wl);
	if (ret < 0)
		goto out;

@@ -3176,7 +3204,7 @@ static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
	if (wl->state == WL1271_STATE_OFF)
		goto out;

	ret = wl1271_ps_elp_wakeup(wl, false);
	ret = wl1271_ps_elp_wakeup(wl);
	if (ret < 0)
		goto out;

@@ -3376,9 +3404,12 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
		for (j = 0; j < AP_MAX_LINKS; j++)
			skb_queue_head_init(&wl->links[j].tx_queue[i]);

	skb_queue_head_init(&wl->deferred_rx_queue);
	skb_queue_head_init(&wl->deferred_tx_queue);

	INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
	INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work);
	INIT_WORK(&wl->irq_work, wl1271_irq_work);
	INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
	INIT_WORK(&wl->tx_work, wl1271_tx_work);
	INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
	INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
+3 −3
Original line number Diff line number Diff line
@@ -69,7 +69,7 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
	}
}

int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
int wl1271_ps_elp_wakeup(struct wl1271 *wl)
{
	DECLARE_COMPLETION_ONSTACK(compl);
	unsigned long flags;
@@ -87,7 +87,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
	 * the completion variable in one entity.
	 */
	spin_lock_irqsave(&wl->wl_lock, flags);
	if (work_pending(&wl->irq_work) || chip_awake)
	if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
		pending = true;
	else
		wl->elp_compl = &compl;
@@ -149,7 +149,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
	case STATION_ACTIVE_MODE:
	default:
		wl1271_debug(DEBUG_PSM, "leaving psm");
		ret = wl1271_ps_elp_wakeup(wl, false);
		ret = wl1271_ps_elp_wakeup(wl);
		if (ret < 0)
			return ret;

+1 −1
Original line number Diff line number Diff line
@@ -30,7 +30,7 @@
int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
		       u32 rates, bool send);
void wl1271_ps_elp_sleep(struct wl1271 *wl);
int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake);
int wl1271_ps_elp_wakeup(struct wl1271 *wl);
void wl1271_elp_work(struct work_struct *work);
void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues);
void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid);
Loading