Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bb98ecd4 authored by Sara Sharon's avatar Sara Sharon Committed by Luca Coelho
Browse files

iwlwifi: pcie: merge iwl_queue and iwl_txq



The original intent was to have the general iwl_queue shared
between RX and TX queues, but it is not the actual status.
Since it is not shared with any struct but iwl_txq, it adds
unnecessary complexity. Merge those structs.

Signed-off-by: default avatarSara Sharon <sara.sharon@intel.com>
Signed-off-by: default avatarLuca Coelho <luciano.coelho@intel.com>
parent 4fe10bc6
Loading
Loading
Loading
Loading
+37 −43
Original line number Diff line number Diff line
@@ -195,39 +195,6 @@ struct iwl_cmd_meta {
	u32 tbs;
};

/*
 * Generic queue structure
 *
 * Contains common data for Rx and Tx queues.
 *
 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
 * there might be HW changes in the future). For the normal TX
 * queues, n_window, which is the size of the software queue data
 * is also 256; however, for the command queue, n_window is only
 * 32 since we don't need so many commands pending. Since the HW
 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result,
 * the software buffers (in the variables @meta, @txb in struct
 * iwl_txq) only have 32 entries, while the HW buffers (@tfds in
 * the same struct) have 256.
 * This means that we end up with the following:
 *  HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
 *  SW entries:           | 0      | ... | 31          |
 * where N is a number between 0 and 7. This means that the SW
 * data is a window overlayed over the HW queue.
 */
struct iwl_queue {
	int write_ptr;       /* 1-st empty entry (index) host_w*/
	int read_ptr;         /* last used entry (index) host_r*/
	/* use for monitoring and recovering the stuck queue */
	dma_addr_t dma_addr;   /* physical addr for BD's */
	int n_window;	       /* safe queue window */
	u32 id;
	int low_mark;	       /* low watermark, resume queue if free
				* space more than this */
	int high_mark;         /* high watermark, stop queue if free
				* space less than this */
};

#define TFD_TX_CMD_SLOTS 256
#define TFD_CMD_SLOTS 32
@@ -274,12 +241,31 @@ struct iwl_pcie_first_tb_buf {
 * @wd_timeout: queue watchdog timeout (jiffies) - per queue
 * @frozen: tx stuck queue timer is frozen
 * @frozen_expiry_remainder: remember how long until the timer fires
 * @write_ptr: 1-st empty entry (index) host_w
 * @read_ptr: last used entry (index) host_r
 * @dma_addr:  physical addr for BD's
 * @n_window: safe queue window
 * @id: queue id
 * @low_mark: low watermark, resume queue if free space more than this
 * @high_mark: high watermark, stop queue if free space less than this
 *
 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
 * descriptors) and required locking structures.
 *
 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
 * there might be HW changes in the future). For the normal TX
 * queues, n_window, which is the size of the software queue data
 * is also 256; however, for the command queue, n_window is only
 * 32 since we don't need so many commands pending. Since the HW
 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
 * This means that we end up with the following:
 *  HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
 *  SW entries:           | 0      | ... | 31          |
 * where N is a number between 0 and 7. This means that the SW
 * data is a window overlayed over the HW queue.
 */
struct iwl_txq {
	struct iwl_queue q;
	void *tfds;
	struct iwl_pcie_first_tb_buf *first_tb_bufs;
	dma_addr_t first_tb_dma;
@@ -295,6 +281,14 @@ struct iwl_txq {
	bool block;
	unsigned long wd_timeout;
	struct sk_buff_head overflow_q;

	int write_ptr;
	int read_ptr;
	dma_addr_t dma_addr;
	int n_window;
	u32 id;
	int low_mark;
	int high_mark;
};

static inline dma_addr_t
@@ -633,9 +627,9 @@ static inline void iwl_wake_queue(struct iwl_trans *trans,
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) {
		IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id);
		iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id);
	if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) {
		IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
		iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
	}
}

@@ -644,22 +638,22 @@ static inline void iwl_stop_queue(struct iwl_trans *trans,
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

	if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) {
		iwl_op_mode_queue_full(trans->op_mode, txq->q.id);
		IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id);
	if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) {
		iwl_op_mode_queue_full(trans->op_mode, txq->id);
		IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
	} else
		IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
				    txq->q.id);
				    txq->id);
}

static inline bool iwl_queue_used(const struct iwl_queue *q, int i)
static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
{
	return q->write_ptr >= q->read_ptr ?
		(i >= q->read_ptr && i < q->write_ptr) :
		!(i < q->read_ptr && i >= q->write_ptr);
}

static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
static inline u8 get_cmd_index(struct iwl_txq *q, u32 index)
{
	return index & (q->n_window - 1);
}
+1 −1
Original line number Diff line number Diff line
@@ -1142,7 +1142,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,

		sequence = le16_to_cpu(pkt->hdr.sequence);
		index = SEQ_TO_INDEX(sequence);
		cmd_index = get_cmd_index(&txq->q, index);
		cmd_index = get_cmd_index(txq, index);

		if (rxq->id == 0)
			iwl_op_mode_rx(trans->op_mode, &rxq->napi,
+13 −17
Original line number Diff line number Diff line
@@ -1899,7 +1899,7 @@ static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,

		txq->frozen = freeze;

		if (txq->q.read_ptr == txq->q.write_ptr)
		if (txq->read_ptr == txq->write_ptr)
			goto next_queue;

		if (freeze) {
@@ -1947,7 +1947,7 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
			txq->block--;
			if (!txq->block) {
				iwl_write32(trans, HBUS_TARG_WRPTR,
					    txq->q.write_ptr | (i << 8));
					    txq->write_ptr | (i << 8));
			}
		} else if (block) {
			txq->block++;
@@ -1967,14 +1967,14 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
	int cnt;

	IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
		txq->q.read_ptr, txq->q.write_ptr);
		txq->read_ptr, txq->write_ptr);

	if (trans->cfg->use_tfh)
		/* TODO: access new SCD registers and dump them */
		return;

	scd_sram_addr = trans_pcie->scd_base_addr +
			SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
			SCD_TX_STTS_QUEUE_OFFSET(txq->id);
	iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));

	iwl_print_hex_error(trans, buf, sizeof(buf));
@@ -2009,7 +2009,6 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_txq *txq;
	struct iwl_queue *q;
	int cnt;
	unsigned long now = jiffies;
	int ret = 0;
@@ -2027,13 +2026,12 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)

		IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
		txq = &trans_pcie->txq[cnt];
		q = &txq->q;
		wr_ptr = ACCESS_ONCE(q->write_ptr);
		wr_ptr = ACCESS_ONCE(txq->write_ptr);

		while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
		while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) &&
		       !time_after(jiffies,
				   now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
			u8 write_ptr = ACCESS_ONCE(q->write_ptr);
			u8 write_ptr = ACCESS_ONCE(txq->write_ptr);

			if (WARN_ONCE(wr_ptr != write_ptr,
				      "WR pointer moved while flushing %d -> %d\n",
@@ -2042,7 +2040,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
			usleep_range(1000, 2000);
		}

		if (q->read_ptr != q->write_ptr) {
		if (txq->read_ptr != txq->write_ptr) {
			IWL_ERR(trans,
				"fail to flush all tx fifo queues Q %d\n", cnt);
			ret = -ETIMEDOUT;
@@ -2210,7 +2208,6 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
	struct iwl_trans *trans = file->private_data;
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_txq *txq;
	struct iwl_queue *q;
	char *buf;
	int pos = 0;
	int cnt;
@@ -2228,10 +2225,9 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,

	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
		txq = &trans_pcie->txq[cnt];
		q = &txq->q;
		pos += scnprintf(buf + pos, bufsz - pos,
				"hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
				cnt, q->read_ptr, q->write_ptr,
				cnt, txq->read_ptr, txq->write_ptr,
				!!test_bit(cnt, trans_pcie->queue_used),
				 !!test_bit(cnt, trans_pcie->queue_stopped),
				 txq->need_update, txq->frozen,
@@ -2659,7 +2655,7 @@ static struct iwl_trans_dump_data

	/* host commands */
	len += sizeof(*data) +
		cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
		cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);

	/* FW monitor */
	if (trans_pcie->fw_mon_page) {
@@ -2727,9 +2723,9 @@ static struct iwl_trans_dump_data
	data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
	txcmd = (void *)data->data;
	spin_lock_bh(&cmdq->lock);
	ptr = cmdq->q.write_ptr;
	for (i = 0; i < cmdq->q.n_window; i++) {
		u8 idx = get_cmd_index(&cmdq->q, ptr);
	ptr = cmdq->write_ptr;
	for (i = 0; i < cmdq->n_window; i++) {
		u8 idx = get_cmd_index(cmdq, ptr);
		u32 caplen, cmdlen;

		cmdlen = iwl_trans_pcie_get_cmdlen(trans, cmdq->tfds +
+95 −102

File changed.

Preview size limit exceeded, changes collapsed.