Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 127901ab authored by Tomas Winkler's avatar Tomas Winkler Committed by John W. Linville
Browse files

iwlwifi: refactor tx byte count table usage



This patch drops unreadable usage of IWL_SET/GET_BITS16 in byte count
tables handling
This patch also cleans a bit the byte count table code and adds
WARN_ON traps on invalid values

This patch is pure cleanup, no functional changes.

Signed-off-by: default avatarTomas Winkler <tomas.winkler@intel.com>
Cc: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: default avatarReinette Chatre <reinette.chatre@intel.com>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent 951891c7
Loading
Loading
Loading
Loading
+6 −32
Original line number Diff line number Diff line
@@ -111,7 +111,6 @@
#define PCI_CFG_CMD_REG_INT_DIS_MSK	0x04
#define PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT         (0x80000000)

#define TFD_QUEUE_SIZE_MAX      (256)

#define IWL_NUM_SCAN_RATES         (2)

@@ -815,8 +814,6 @@ enum {
 * up to 7 DMA channels (FIFOs).  Each Tx queue is supported by a circular array
 * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
 */
#define IWL49_MAX_WIN_SIZE	64
#define IWL49_QUEUE_SIZE	256
#define IWL49_NUM_FIFOS 	7
#define IWL49_CMD_FIFO_NUM	4
#define IWL49_NUM_QUEUES	16
@@ -882,26 +879,7 @@ struct iwl_tfd {


/**
 * struct iwl4965_queue_byte_cnt_entry
 *
 * Byte Count Table Entry
 *
 * Bit fields:
 * 15-12: reserved
 * 11- 0: total to-be-transmitted byte count of frame (does not include command)
 */
struct iwl4965_queue_byte_cnt_entry {
	__le16 val;
	/* __le16 byte_cnt:12; */
#define IWL_byte_cnt_POS 0
#define IWL_byte_cnt_LEN 12
#define IWL_byte_cnt_SYM val
	/* __le16 rsvd:4; */
} __attribute__ ((packed));


/**
 * struct iwl4965_sched_queue_byte_cnt_tbl
 * struct iwl4965_schedq_bc_tbl
 *
 * Byte Count table
 *
@@ -915,15 +893,12 @@ struct iwl4965_queue_byte_cnt_entry {
 * count table for the chosen Tx queue.  If the TFD index is 0-63, the driver
 * must duplicate the byte count entry in corresponding index 256-319.
 *
 * "dont_care" padding puts each byte count table on a 1024-byte boundary;
 * padding puts each byte count table on a 1024-byte boundary;
 * 4965 assumes tables are separated by 1024 bytes.
 */
struct iwl4965_sched_queue_byte_cnt_tbl {
	struct iwl4965_queue_byte_cnt_entry tfd_offset[IWL49_QUEUE_SIZE +
						       IWL49_MAX_WIN_SIZE];
	u8 dont_care[1024 -
		     (IWL49_QUEUE_SIZE + IWL49_MAX_WIN_SIZE) *
		     sizeof(__le16)];
struct iwl4965_schedq_bc_tbl {
	__le16 tfd_offset[TFD_QUEUE_BC_SIZE];
	u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
} __attribute__ ((packed));


@@ -951,8 +926,7 @@ struct iwl4965_sched_queue_byte_cnt_tbl {
 * 31- 0:  Not used
 */
struct iwl4965_shared {
	struct iwl4965_sched_queue_byte_cnt_tbl
	 queues_byte_cnt_tbls[IWL49_NUM_QUEUES];
	struct iwl4965_schedq_bc_tbl queues_bc_tbls[IWL49_NUM_QUEUES];
	__le32 rb_closed;

	/* __le32 rb_closed_stts_rb_num:12; */
+11 −10
Original line number Diff line number Diff line
@@ -716,7 +716,7 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
	/* Tel 4965 where to find Tx byte count tables */
	iwl_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
		(priv->shared_phys +
		 offsetof(struct iwl4965_shared, queues_byte_cnt_tbls)) >> 10);
		 offsetof(struct iwl4965_shared, queues_bc_tbls)) >> 10);

	/* Disable chain mode for all queues */
	iwl_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
@@ -1668,21 +1668,22 @@ static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
					    struct iwl_tx_queue *txq,
					    u16 byte_cnt)
{
	int len;
	int txq_id = txq->q.id;
	struct iwl4965_shared *shared_data = priv->shared_virt;
	int txq_id = txq->q.id;
	int write_ptr = txq->q.write_ptr;
	int len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
	__le16 bc_ent;

	len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
	WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);

	bc_ent = cpu_to_le16(len & 0xFFF);
	/* Set up byte count within first 256 entries */
	IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
		       tfd_offset[txq->q.write_ptr], byte_cnt, len);
	shared_data->queues_bc_tbls[txq_id].tfd_offset[write_ptr] = bc_ent;

	/* If within first 64 entries, duplicate at end */
	if (txq->q.write_ptr < IWL49_MAX_WIN_SIZE)
		IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
			tfd_offset[IWL49_QUEUE_SIZE + txq->q.write_ptr],
			byte_cnt, len);
	if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
		shared_data->queues_bc_tbls[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
}

/**
+15 −14
Original line number Diff line number Diff line
@@ -76,30 +76,31 @@
/* EERPROM */
#define IWL_5000_EEPROM_IMG_SIZE			2048


#define IWL50_MAX_WIN_SIZE                64
#define IWL50_QUEUE_SIZE                 256
#define IWL50_CMD_FIFO_NUM                 7
#define IWL50_NUM_QUEUES                  20
#define IWL50_NUM_AMPDU_QUEUES		  10
#define IWL50_FIRST_AMPDU_QUEUE		  10

#define IWL_sta_id_POS 12
#define IWL_sta_id_LEN 4
#define IWL_sta_id_SYM val

/* Fixed (non-configurable) rx data from phy */

/* Base physical address of iwl5000_shared is provided to SCD_DRAM_BASE_ADDR
 * and &iwl5000_shared.val0 is provided to FH_RSCSR_CHNL0_STTS_WPTR_REG */
struct iwl5000_sched_queue_byte_cnt_tbl {
	struct iwl4965_queue_byte_cnt_entry tfd_offset[IWL50_QUEUE_SIZE +
						       IWL50_MAX_WIN_SIZE];
/**
 * struct iwl5000_schedq_bc_tbl scheduler byte count table
 * 	base physical address of iwl5000_shared
 * 	is provided to SCD_DRAM_BASE_ADDR
 * @tfd_offset  0-12 - tx command byte count
 *	       12-16 - station index
 */
struct iwl5000_schedq_bc_tbl {
	__le16 tfd_offset[TFD_QUEUE_BC_SIZE];
} __attribute__ ((packed));

/**
 * struct iwl5000_shared
 * @rb_closed
 * 	address is provided to FH_RSCSR_CHNL0_STTS_WPTR_REG
 */
struct iwl5000_shared {
	struct iwl5000_sched_queue_byte_cnt_tbl
	 queues_byte_cnt_tbls[IWL50_NUM_QUEUES];
	struct iwl5000_schedq_bc_tbl queues_bc_tbls[IWL50_NUM_QUEUES];
	__le32 rb_closed;

	/* __le32 rb_closed_stts_rb_num:12; */
+25 −27
Original line number Diff line number Diff line
@@ -723,7 +723,7 @@ static int iwl5000_alive_notify(struct iwl_priv *priv)

	iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR,
		(priv->shared_phys +
		 offsetof(struct iwl5000_shared, queues_byte_cnt_tbls)) >> 10);
		 offsetof(struct iwl5000_shared, queues_bc_tbls)) >> 10);
	iwl_write_prph(priv, IWL50_SCD_QUEUECHAIN_SEL,
		IWL50_SCD_QUEUECHAIN_SEL_ALL(
			priv->hw_params.max_txq_num));
@@ -891,15 +891,17 @@ static void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
					    u16 byte_cnt)
{
	struct iwl5000_shared *shared_data = priv->shared_virt;
	int write_ptr = txq->q.write_ptr;
	int txq_id = txq->q.id;
	u8 sec_ctl = 0;
	u8 sta = 0;
	int len;
	u8 sta_id = 0;
	u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
	__le16 bc_ent;

	len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
	WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);

	if (txq_id != IWL_CMD_QUEUE_NUM) {
		sta = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
		sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
		sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;

		switch (sec_ctl & TX_CMD_SEC_MSK) {
@@ -915,40 +917,36 @@ static void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
		}
	}

	IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
		       tfd_offset[txq->q.write_ptr], byte_cnt, len);
	bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));

	IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
		       tfd_offset[txq->q.write_ptr], sta_id, sta);
	shared_data->queues_bc_tbls[txq_id].tfd_offset[write_ptr] = bc_ent;

	if (txq->q.write_ptr < IWL50_MAX_WIN_SIZE) {
		IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
			tfd_offset[IWL50_QUEUE_SIZE + txq->q.write_ptr],
			byte_cnt, len);
		IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
			tfd_offset[IWL50_QUEUE_SIZE + txq->q.write_ptr],
			sta_id, sta);
	}
	if (txq->q.write_ptr < TFD_QUEUE_SIZE_BC_DUP)
		shared_data->queues_bc_tbls[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
}

static void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
					   struct iwl_tx_queue *txq)
{
	int txq_id = txq->q.id;
	struct iwl5000_shared *shared_data = priv->shared_virt;
	u8 sta = 0;
	int txq_id = txq->q.id;
	int read_ptr = txq->q.read_ptr;
	u8 sta_id = 0;
	__le16 bc_ent;

	WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);

	if (txq_id != IWL_CMD_QUEUE_NUM)
		sta = txq->cmd[txq->q.read_ptr]->cmd.tx.sta_id;
		sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;

	shared_data->queues_byte_cnt_tbls[txq_id].tfd_offset[txq->q.read_ptr].
					val = cpu_to_le16(1 | (sta << 12));
	bc_ent =  cpu_to_le16(1 | (sta_id << 12));
	shared_data->queues_bc_tbls[txq_id].
			tfd_offset[read_ptr] = bc_ent;

	if (txq->q.write_ptr < IWL50_MAX_WIN_SIZE) {
		shared_data->queues_byte_cnt_tbls[txq_id].
			tfd_offset[IWL50_QUEUE_SIZE + txq->q.read_ptr].
				val = cpu_to_le16(1 | (sta << 12));
	}
	if (txq->q.write_ptr < TFD_QUEUE_SIZE_BC_DUP)
		shared_data->queues_bc_tbls[txq_id].
			tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] =  bc_ent;
}

static int iwl5000_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
+5 −0
Original line number Diff line number Diff line
@@ -393,4 +393,9 @@
/* TCSR: tx_config register values */
#define FH_RSCSR_FRAME_SIZE_MSK	(0x00003FFF)	/* bits 0-13 */

#define TFD_QUEUE_SIZE_MAX      (256)
#define TFD_QUEUE_SIZE_BC_DUP	(64)
#define TFD_QUEUE_BC_SIZE	(TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)


#endif /* !__iwl_fh_h__ */
Loading