Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dd05f9aa authored by Shahar S Matityahu's avatar Shahar S Matityahu Committed by Luca Coelho
Browse files

iwlwifi: pcie: dynamic Tx command queue size



Devices in the A000 family can use a different size for the command queue.
To allow this, make the command queue size configurable and set the size
for A000 devices to 32.

Signed-off-by: default avatarShahar S Matityahu <shahar.s.matityahu@intel.com>
Signed-off-by: default avatarLuca Coelho <luciano.coelho@intel.com>
parent b88beaf9
Loading
Loading
Loading
Loading
+2 −1
Original line number Original line Diff line number Diff line
@@ -134,7 +134,8 @@ static const struct iwl_ht_params iwl_a000_ht_params = {
	.rf_id = true,							\
	.rf_id = true,							\
	.gen2 = true,							\
	.gen2 = true,							\
	.ext_nvm = true,						\
	.ext_nvm = true,						\
	.dbgc_supported = true
	.dbgc_supported = true,						\
	.tx_cmd_queue_size = 32


const struct iwl_cfg iwla000_2ac_cfg_hr = {
const struct iwl_cfg iwla000_2ac_cfg_hr = {
		.name = "Intel(R) Dual Band Wireless AC a000",
		.name = "Intel(R) Dual Band Wireless AC a000",
+3 −0
Original line number Original line Diff line number Diff line
@@ -321,6 +321,8 @@ struct iwl_pwr_tx_backoff {
 * @gen2: a000 and on transport operation
 * @gen2: a000 and on transport operation
 * @cdb: CDB support
 * @cdb: CDB support
 * @ext_nvm: extended NVM format
 * @ext_nvm: extended NVM format
 * @tx_cmd_queue_size: size of the cmd queue. If zero, use the same value as
 *	the regular queues
 *
 *
 * We enable the driver to be backward compatible wrt. hardware features.
 * We enable the driver to be backward compatible wrt. hardware features.
 * API differences in uCode shouldn't be handled here but through TLVs
 * API differences in uCode shouldn't be handled here but through TLVs
@@ -371,6 +373,7 @@ struct iwl_cfg {
	    cdb:1,
	    cdb:1,
	    ext_nvm:1,
	    ext_nvm:1,
	    dbgc_supported:1;
	    dbgc_supported:1;
	u16 tx_cmd_queue_size;
	u8 valid_tx_ant;
	u8 valid_tx_ant;
	u8 valid_rx_ant;
	u8 valid_rx_ant;
	u8 non_shared_ant;
	u8 non_shared_ant;
+1 −1
Original line number Original line Diff line number Diff line
@@ -244,7 +244,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
	ctxt_info->hcmd_cfg.cmd_queue_addr =
	ctxt_info->hcmd_cfg.cmd_queue_addr =
		cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
		cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
	ctxt_info->hcmd_cfg.cmd_queue_size =
	ctxt_info->hcmd_cfg.cmd_queue_size =
		TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS);
		TFD_QUEUE_CB_SIZE(trans_pcie->tx_cmd_queue_size);


	/* allocate ucode sections in dram and set addresses */
	/* allocate ucode sections in dram and set addresses */
	ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info);
	ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info);
+3 −0
Original line number Original line Diff line number Diff line
@@ -383,6 +383,7 @@ struct iwl_self_init_dram {
 * @hw_init_mask: initial unmasked hw causes
 * @hw_init_mask: initial unmasked hw causes
 * @fh_mask: current unmasked fh causes
 * @fh_mask: current unmasked fh causes
 * @hw_mask: current unmasked hw causes
 * @hw_mask: current unmasked hw causes
 * @tx_cmd_queue_size: the size of the tx command queue
 */
 */
struct iwl_trans_pcie {
struct iwl_trans_pcie {
	struct iwl_rxq *rxq;
	struct iwl_rxq *rxq;
@@ -463,6 +464,7 @@ struct iwl_trans_pcie {
	u32 fh_mask;
	u32 fh_mask;
	u32 hw_mask;
	u32 hw_mask;
	cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
	cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
	u16 tx_cmd_queue_size;
};
};


static inline struct iwl_trans_pcie *
static inline struct iwl_trans_pcie *
@@ -534,6 +536,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
			    struct sk_buff_head *skbs);
			    struct sk_buff_head *skbs);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
void iwl_pcie_set_tx_cmd_queue_size(struct iwl_trans *trans);


static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
					  u8 idx)
					  u8 idx)
+6 −2
Original line number Original line Diff line number Diff line
@@ -1160,6 +1160,8 @@ int iwl_pcie_gen2_tx_init(struct iwl_trans *trans)
	struct iwl_txq *cmd_queue;
	struct iwl_txq *cmd_queue;
	int txq_id = trans_pcie->cmd_queue, ret;
	int txq_id = trans_pcie->cmd_queue, ret;


	iwl_pcie_set_tx_cmd_queue_size(trans);

	/* alloc and init the command queue */
	/* alloc and init the command queue */
	if (!trans_pcie->txq[txq_id]) {
	if (!trans_pcie->txq[txq_id]) {
		cmd_queue = kzalloc(sizeof(*cmd_queue), GFP_KERNEL);
		cmd_queue = kzalloc(sizeof(*cmd_queue), GFP_KERNEL);
@@ -1168,7 +1170,8 @@ int iwl_pcie_gen2_tx_init(struct iwl_trans *trans)
			return -ENOMEM;
			return -ENOMEM;
		}
		}
		trans_pcie->txq[txq_id] = cmd_queue;
		trans_pcie->txq[txq_id] = cmd_queue;
		ret = iwl_pcie_txq_alloc(trans, cmd_queue, TFD_CMD_SLOTS, true);
		ret = iwl_pcie_txq_alloc(trans, cmd_queue,
					 trans_pcie->tx_cmd_queue_size, true);
		if (ret) {
		if (ret) {
			IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
			IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
			goto error;
			goto error;
@@ -1177,7 +1180,8 @@ int iwl_pcie_gen2_tx_init(struct iwl_trans *trans)
		cmd_queue = trans_pcie->txq[txq_id];
		cmd_queue = trans_pcie->txq[txq_id];
	}
	}


	ret = iwl_pcie_txq_init(trans, cmd_queue, TFD_CMD_SLOTS, true);
	ret = iwl_pcie_txq_init(trans, cmd_queue,
				trans_pcie->tx_cmd_queue_size, true);
	if (ret) {
	if (ret) {
		IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
		IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
		goto error;
		goto error;
Loading