Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b2cf410c authored by Johannes Berg's avatar Johannes Berg Committed by John W. Linville
Browse files

iwlwifi: move rx_page_order into transport



That way it isn't needed in hw_params, which
is shared data. It also isn't really what we
should configure in the transport, that is
better just 4k/8k, so configure a bool and
derive the page order in the transport. This
also means the transport doesn't need access
to the module parameter any more.

Signed-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
Signed-off-by: default avatarWey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent e3e07e0b
Loading
Loading
Loading
Loading
+1 −11
Original line number Diff line number Diff line
@@ -1401,23 +1401,12 @@ static void iwl_uninit_drv(struct iwl_priv *priv)
#endif
}

/* Size of one Rx buffer in host DRAM */
#define IWL_RX_BUF_SIZE_4K (4 * 1024)
#define IWL_RX_BUF_SIZE_8K (8 * 1024)

static void iwl_set_hw_params(struct iwl_priv *priv)
{
	if (cfg(priv)->ht_params)
		hw_params(priv).use_rts_for_aggregation =
			cfg(priv)->ht_params->use_rts_for_aggregation;

	if (iwlagn_mod_params.amsdu_size_8K)
		hw_params(priv).rx_page_order =
			get_order(IWL_RX_BUF_SIZE_8K);
	else
		hw_params(priv).rx_page_order =
			get_order(IWL_RX_BUF_SIZE_4K);

	if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
		hw_params(priv).sku &= ~EEPROM_SKU_CAP_11N_ENABLE;

@@ -1508,6 +1497,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
	trans_cfg.op_mode = op_mode;
	trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
	trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
	trans_cfg.rx_buf_size_8k = iwlagn_mod_params.amsdu_size_8K;

	ucode_flags = fw->ucode_capa.flags;

+0 −2
Original line number Diff line number Diff line
@@ -166,7 +166,6 @@ struct iwl_mod_params {
 * @valid_rx_ant: usable antennas for RX
 * @ht40_channel: is 40MHz width possible: BIT(IEEE80211_BAND_XXX)
 * @sku: sku read from EEPROM
 * @rx_page_order: Rx buffer page order
 * @ct_kill_threshold: temperature threshold - in hw dependent unit
 * @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit
 *	relevant for 1000, 6000 and up
@@ -182,7 +181,6 @@ struct iwl_hw_params {
	u8  ht40_channel;
	bool use_rts_for_aggregation;
	u16 sku;
	u32 rx_page_order;
	u32 ct_kill_threshold;
	u32 ct_kill_exit_threshold;
	unsigned int wd_timeout;
+5 −0
Original line number Diff line number Diff line
@@ -227,6 +227,8 @@ struct iwl_tx_queue {
 * @ucode_write_waitq: wait queue for uCode load
 * @status - transport specific status flags
 * @cmd_queue - command queue number
 * @rx_buf_size_8k: 8 kB RX buffer size
 * @rx_page_order: page order for receive buffer size
 */
struct iwl_trans_pcie {
	struct iwl_rx_queue rxq;
@@ -266,6 +268,9 @@ struct iwl_trans_pcie {
	u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
	u8 setup_q_to_fifo[IWL_MAX_HW_QUEUES];
	u8 n_q_to_fifo;

	bool rx_buf_size_8k;
	u32 rx_page_order;
};

#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
+8 −8
Original line number Diff line number Diff line
@@ -274,17 +274,17 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
		if (rxq->free_count > RX_LOW_WATERMARK)
			gfp_mask |= __GFP_NOWARN;

		if (hw_params(trans).rx_page_order > 0)
		if (trans_pcie->rx_page_order > 0)
			gfp_mask |= __GFP_COMP;

		/* Alloc a new receive buffer */
		page = alloc_pages(gfp_mask,
				  hw_params(trans).rx_page_order);
				  trans_pcie->rx_page_order);
		if (!page) {
			if (net_ratelimit())
				IWL_DEBUG_INFO(trans, "alloc_pages failed, "
					   "order: %d\n",
					   hw_params(trans).rx_page_order);
					   trans_pcie->rx_page_order);

			if ((rxq->free_count <= RX_LOW_WATERMARK) &&
			    net_ratelimit())
@@ -303,7 +303,7 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)

		if (list_empty(&rxq->rx_used)) {
			spin_unlock_irqrestore(&rxq->lock, flags);
			__free_pages(page, hw_params(trans).rx_page_order);
			__free_pages(page, trans_pcie->rx_page_order);
			return;
		}
		element = rxq->rx_used.next;
@@ -316,7 +316,7 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
		rxb->page = page;
		/* Get physical address of the RB */
		rxb->page_dma = dma_map_page(trans->dev, page, 0,
				PAGE_SIZE << hw_params(trans).rx_page_order,
				PAGE_SIZE << trans_pcie->rx_page_order,
				DMA_FROM_DEVICE);
		/* dma address must be no more than 36 bits */
		BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
@@ -367,7 +367,7 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
	struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
	unsigned long flags;
	bool page_stolen = false;
	int max_len = PAGE_SIZE << hw_params(trans).rx_page_order;
	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
	u32 offset = 0;

	if (WARN_ON(!rxb))
@@ -452,7 +452,7 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,

	/* page was stolen from us -- free our reference */
	if (page_stolen) {
		__free_pages(rxb->page, hw_params(trans).rx_page_order);
		__free_pages(rxb->page, trans_pcie->rx_page_order);
		rxb->page = NULL;
	}

@@ -463,7 +463,7 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
	if (rxb->page != NULL) {
		rxb->page_dma =
			dma_map_page(trans->dev, rxb->page, 0,
				PAGE_SIZE << hw_params(trans).rx_page_order,
				PAGE_SIZE << trans_pcie->rx_page_order,
				DMA_FROM_DEVICE);
		list_add_tail(&rxb->list, &rxq->rx_free);
		rxq->free_count++;
+1 −1
Original line number Diff line number Diff line
@@ -765,7 +765,7 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,

		meta->source->resp_pkt = pkt;
		meta->source->_rx_page_addr = (unsigned long)page_address(p);
		meta->source->_rx_page_order = hw_params(trans).rx_page_order;
		meta->source->_rx_page_order = trans_pcie->rx_page_order;
		meta->source->handler_status = handler_status;
	}

Loading