Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8ad71bef authored by Emmanuel Grumbach's avatar Emmanuel Grumbach Committed by John W. Linville
Browse files

iwlagn: move tx queues to transport layer



This finalizes the move of the data path to the transport layer.

Signed-off-by: default avatarEmmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: default avatarWey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent e20d4341
Loading
Loading
Loading
Loading
+2 −21
Original line number Original line Diff line number Diff line
@@ -742,7 +742,6 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
	int txq_id = SEQ_TO_QUEUE(sequence);
	int txq_id = SEQ_TO_QUEUE(sequence);
	int cmd_index = SEQ_TO_INDEX(sequence);
	int cmd_index = SEQ_TO_INDEX(sequence);
	struct iwl_tx_queue *txq = &priv->txq[txq_id];
	struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
	struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
	struct ieee80211_hdr *hdr;
	struct ieee80211_hdr *hdr;
	u32 status = le16_to_cpu(tx_resp->status.status);
	u32 status = le16_to_cpu(tx_resp->status.status);
@@ -755,17 +754,7 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
	struct sk_buff_head skbs;
	struct sk_buff_head skbs;
	struct sk_buff *skb;
	struct sk_buff *skb;
	struct iwl_rxon_context *ctx;
	struct iwl_rxon_context *ctx;

	bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
	if ((cmd_index >= txq->q.n_bd) ||
	    (iwl_queue_used(&txq->q, cmd_index) == 0)) {
		IWL_ERR(priv, "%s: Read index for DMA queue txq_id (%d) "
			  "cmd_index %d is out of range [0-%d] %d %d\n",
			  __func__, txq_id, cmd_index, txq->q.n_bd,
			  txq->q.write_ptr, txq->q.read_ptr);
		return;
	}

	txq->time_stamp = jiffies;


	tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
	tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
		IWLAGN_TX_RES_TID_POS;
		IWLAGN_TX_RES_TID_POS;
@@ -774,12 +763,10 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)


	spin_lock_irqsave(&priv->shrd->sta_lock, flags);
	spin_lock_irqsave(&priv->shrd->sta_lock, flags);


	if (txq->sched_retry)
	if (is_agg)
		iwl_rx_reply_tx_agg(priv, tx_resp);
		iwl_rx_reply_tx_agg(priv, tx_resp);


	if (tx_resp->frame_count == 1) {
	if (tx_resp->frame_count == 1) {
		bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);

		__skb_queue_head_init(&skbs);
		__skb_queue_head_init(&skbs);
		/*we can free until ssn % q.n_bd not inclusive */
		/*we can free until ssn % q.n_bd not inclusive */
		iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id,
		iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id,
@@ -850,14 +837,12 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
{
{
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
	struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
	struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
	struct iwl_tx_queue *txq = NULL;
	struct iwl_ht_agg *agg;
	struct iwl_ht_agg *agg;
	struct sk_buff_head reclaimed_skbs;
	struct sk_buff_head reclaimed_skbs;
	struct ieee80211_tx_info *info;
	struct ieee80211_tx_info *info;
	struct ieee80211_hdr *hdr;
	struct ieee80211_hdr *hdr;
	struct sk_buff *skb;
	struct sk_buff *skb;
	unsigned long flags;
	unsigned long flags;
	int index;
	int sta_id;
	int sta_id;
	int tid;
	int tid;
	int freed;
	int freed;
@@ -875,14 +860,10 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
		return;
		return;
	}
	}


	txq = &priv->txq[scd_flow];
	sta_id = ba_resp->sta_id;
	sta_id = ba_resp->sta_id;
	tid = ba_resp->tid;
	tid = ba_resp->tid;
	agg = &priv->shrd->tid_data[sta_id][tid].agg;
	agg = &priv->shrd->tid_data[sta_id][tid].agg;


	/* Find index of block-ack window */
	index = ba_resp_scd_ssn & (txq->q.n_bd - 1);

	spin_lock_irqsave(&priv->shrd->sta_lock, flags);
	spin_lock_irqsave(&priv->shrd->sta_lock, flags);


	if (unlikely(agg->txq_id != scd_flow)) {
	if (unlikely(agg->txq_id != scd_flow)) {
+0 −42
Original line number Original line Diff line number Diff line
@@ -574,19 +574,6 @@ struct iwl_sensitivity_ranges {
 ****************************************************************************/
 ****************************************************************************/
extern void iwl_update_chain_flags(struct iwl_priv *priv);
extern void iwl_update_chain_flags(struct iwl_priv *priv);
extern const u8 iwl_bcast_addr[ETH_ALEN];
extern const u8 iwl_bcast_addr[ETH_ALEN];
extern int iwl_queue_space(const struct iwl_queue *q);
static inline int iwl_queue_used(const struct iwl_queue *q, int i)
{
	return q->write_ptr >= q->read_ptr ?
		(i >= q->read_ptr && i < q->write_ptr) :
		!(i < q->read_ptr && i >= q->write_ptr);
}


static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
{
	return index & (q->n_window - 1);
}


#define IWL_OPERATION_MODE_AUTO     0
#define IWL_OPERATION_MODE_AUTO     0
#define IWL_OPERATION_MODE_HT_ONLY  1
#define IWL_OPERATION_MODE_HT_ONLY  1
@@ -1156,10 +1143,6 @@ struct iwl_priv {


	int activity_timer_active;
	int activity_timer_active;


	/* Tx DMA processing queues */
	struct iwl_tx_queue *txq;
	unsigned long txq_ctx_active_msk;

	/* counts mgmt, ctl, and data packets */
	/* counts mgmt, ctl, and data packets */
	struct traffic_stats tx_stats;
	struct traffic_stats tx_stats;
	struct traffic_stats rx_stats;
	struct traffic_stats rx_stats;
@@ -1172,12 +1155,6 @@ struct iwl_priv {
	struct iwl_station_entry stations[IWLAGN_STATION_COUNT];
	struct iwl_station_entry stations[IWLAGN_STATION_COUNT];
	unsigned long ucode_key_table;
	unsigned long ucode_key_table;


	/* queue refcounts */
#define IWL_MAX_HW_QUEUES	32
	unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
	/* for each AC */
	atomic_t queue_stop_count[4];

	/* Indication if ieee80211_ops->open has been called */
	/* Indication if ieee80211_ops->open has been called */
	u8 is_open;
	u8 is_open;


@@ -1334,27 +1311,8 @@ struct iwl_priv {
	bool have_rekey_data;
	bool have_rekey_data;
}; /*iwl_priv */
}; /*iwl_priv */


static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
{
	set_bit(txq_id, &priv->txq_ctx_active_msk);
}

static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
{
	clear_bit(txq_id, &priv->txq_ctx_active_msk);
}

extern struct iwl_mod_params iwlagn_mod_params;
extern struct iwl_mod_params iwlagn_mod_params;


static inline struct ieee80211_hdr *iwl_tx_queue_get_hdr(struct iwl_priv *priv,
							 int txq_id, int idx)
{
	if (priv->txq[txq_id].skbs[idx])
		return (struct ieee80211_hdr *)priv->txq[txq_id].
				skbs[idx]->data;
	return NULL;
}

static inline struct iwl_rxon_context *
static inline struct iwl_rxon_context *
iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
{
{
+46 −4
Original line number Original line Diff line number Diff line
@@ -125,6 +125,10 @@ struct iwl_dma_ptr {
 * @ac_to_fifo: to what fifo is a specifc AC mapped ?
 * @ac_to_fifo: to what fifo is a specifc AC mapped ?
 * @ac_to_queue: to what tx queue  is a specifc AC mapped ?
 * @ac_to_queue: to what tx queue  is a specifc AC mapped ?
 * @mcast_queue:
 * @mcast_queue:
 * @txq: Tx DMA processing queues
 * @txq_ctx_active_msk: what queue is active
 * queue_stopped: tracks what queue is stopped
 * queue_stop_count: tracks what SW queue is stopped
 */
 */
struct iwl_trans_pcie {
struct iwl_trans_pcie {
	struct iwl_rx_queue rxq;
	struct iwl_rx_queue rxq;
@@ -150,6 +154,12 @@ struct iwl_trans_pcie {
	const u8 *ac_to_fifo[NUM_IWL_RXON_CTX];
	const u8 *ac_to_fifo[NUM_IWL_RXON_CTX];
	const u8 *ac_to_queue[NUM_IWL_RXON_CTX];
	const u8 *ac_to_queue[NUM_IWL_RXON_CTX];
	u8 mcast_queue[NUM_IWL_RXON_CTX];
	u8 mcast_queue[NUM_IWL_RXON_CTX];

	struct iwl_tx_queue *txq;
	unsigned long txq_ctx_active_msk;
#define IWL_MAX_HW_QUEUES	32
	unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
	atomic_t queue_stop_count[4];
};
};


#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
@@ -207,6 +217,7 @@ void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
	int index);
	int index);
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
			 struct sk_buff_head *skbs);
			 struct sk_buff_head *skbs);
int iwl_queue_space(const struct iwl_queue *q);


/*****************************************************
/*****************************************************
* Error handling
* Error handling
@@ -216,6 +227,9 @@ int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display);
int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display);
void iwl_dump_csr(struct iwl_trans *trans);
void iwl_dump_csr(struct iwl_trans *trans);


/*****************************************************
* Helpers
******************************************************/
static inline void iwl_disable_interrupts(struct iwl_trans *trans)
static inline void iwl_disable_interrupts(struct iwl_trans *trans)
{
{
	clear_bit(STATUS_INT_ENABLED, &trans->shrd->status);
	clear_bit(STATUS_INT_ENABLED, &trans->shrd->status);
@@ -265,12 +279,14 @@ static inline void iwl_wake_queue(struct iwl_trans *trans,
	u8 queue = txq->swq_id;
	u8 queue = txq->swq_id;
	u8 ac = queue & 3;
	u8 ac = queue & 3;
	u8 hwq = (queue >> 2) & 0x1f;
	u8 hwq = (queue >> 2) & 0x1f;
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);


	if (unlikely(!trans->shrd->mac80211_registered))
	if (unlikely(!trans->shrd->mac80211_registered))
		return;
		return;


	if (test_and_clear_bit(hwq, priv(trans)->queue_stopped))
	if (test_and_clear_bit(hwq, trans_pcie->queue_stopped))
		if (atomic_dec_return(&priv(trans)->queue_stop_count[ac]) <= 0)
		if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0)
			ieee80211_wake_queue(trans->shrd->hw, ac);
			ieee80211_wake_queue(trans->shrd->hw, ac);
}
}


@@ -280,12 +296,14 @@ static inline void iwl_stop_queue(struct iwl_trans *trans,
	u8 queue = txq->swq_id;
	u8 queue = txq->swq_id;
	u8 ac = queue & 3;
	u8 ac = queue & 3;
	u8 hwq = (queue >> 2) & 0x1f;
	u8 hwq = (queue >> 2) & 0x1f;
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);


	if (unlikely(!trans->shrd->mac80211_registered))
	if (unlikely(!trans->shrd->mac80211_registered))
		return;
		return;


	if (!test_and_set_bit(hwq, priv(trans)->queue_stopped))
	if (!test_and_set_bit(hwq, trans_pcie->queue_stopped))
		if (atomic_inc_return(&priv(trans)->queue_stop_count[ac]) > 0)
		if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0)
			ieee80211_stop_queue(trans->shrd->hw, ac);
			ieee80211_stop_queue(trans->shrd->hw, ac);
}
}


@@ -301,4 +319,28 @@ static inline void iwl_stop_queue(struct iwl_trans *trans,


#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue


static inline void iwl_txq_ctx_activate(struct iwl_trans_pcie *trans_pcie,
					int txq_id)
{
	set_bit(txq_id, &trans_pcie->txq_ctx_active_msk);
}

static inline void iwl_txq_ctx_deactivate(struct iwl_trans_pcie *trans_pcie,
					  int txq_id)
{
	clear_bit(txq_id, &trans_pcie->txq_ctx_active_msk);
}

static inline int iwl_queue_used(const struct iwl_queue *q, int i)
{
	return q->write_ptr >= q->read_ptr ?
		(i >= q->read_ptr && i < q->write_ptr) :
		!(i < q->read_ptr && i >= q->write_ptr);
}

static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
{
	return index & (q->n_window - 1);
}

#endif /* __iwl_trans_int_pcie_h__ */
#endif /* __iwl_trans_int_pcie_h__ */
+1 −1
Original line number Original line Diff line number Diff line
@@ -1032,7 +1032,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
		iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq);
		iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq);
		for (i = 0; i < hw_params(trans).max_txq_num; i++)
		for (i = 0; i < hw_params(trans).max_txq_num; i++)
			iwl_txq_update_write_ptr(trans,
			iwl_txq_update_write_ptr(trans,
						 &priv(trans)->txq[i]);
						 &trans_pcie->txq[i]);


		isr_stats->wakeup++;
		isr_stats->wakeup++;


+32 −21
Original line number Original line Diff line number Diff line
@@ -407,9 +407,10 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
					struct iwl_tx_queue *txq,
					struct iwl_tx_queue *txq,
					int tx_fifo_id, int scd_retry)
					int tx_fifo_id, int scd_retry)
{
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int txq_id = txq->q.id;
	int txq_id = txq->q.id;
	int active =
	int active =
		test_bit(txq_id, &priv(trans)->txq_ctx_active_msk) ? 1 : 0;
		test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0;


	iwl_write_prph(bus(trans), SCD_QUEUE_STATUS_BITS(txq_id),
	iwl_write_prph(bus(trans), SCD_QUEUE_STATUS_BITS(txq_id),
			(active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
			(active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
@@ -482,8 +483,8 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,


	/* Place first TFD at index corresponding to start sequence number.
	/* Place first TFD at index corresponding to start sequence number.
	 * Assumes that ssn_idx is valid (!= 0xFFF) */
	 * Assumes that ssn_idx is valid (!= 0xFFF) */
	priv(trans)->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
	trans_pcie->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
	priv(trans)->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
	trans_pcie->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
	iwl_trans_set_wr_ptrs(trans, txq_id, ssn_idx);
	iwl_trans_set_wr_ptrs(trans, txq_id, ssn_idx);


	/* Set up Tx window size and frame limit for this queue */
	/* Set up Tx window size and frame limit for this queue */
@@ -500,11 +501,11 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
	iwl_set_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
	iwl_set_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));


	/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
	/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
	iwl_trans_tx_queue_set_status(trans, &priv(trans)->txq[txq_id],
	iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
					tx_fifo, 1);
					tx_fifo, 1);


	priv(trans)->txq[txq_id].sta_id = sta_id;
	trans_pcie->txq[txq_id].sta_id = sta_id;
	priv(trans)->txq[txq_id].tid = tid;
	trans_pcie->txq[txq_id].tid = tid;


	spin_unlock_irqrestore(&trans->shrd->lock, flags);
	spin_unlock_irqrestore(&trans->shrd->lock, flags);
}
}
@@ -517,11 +518,12 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
 */
 */
static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
{
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int txq_id;
	int txq_id;


	for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
	for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
		if (!test_and_set_bit(txq_id,
		if (!test_and_set_bit(txq_id,
					&priv(trans)->txq_ctx_active_msk))
					&trans_pcie->txq_ctx_active_msk))
			return txq_id;
			return txq_id;
	return -1;
	return -1;
}
}
@@ -530,6 +532,7 @@ int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
				enum iwl_rxon_context_id ctx, int sta_id,
				enum iwl_rxon_context_id ctx, int sta_id,
				int tid, u16 *ssn)
				int tid, u16 *ssn)
{
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tid_data *tid_data;
	struct iwl_tid_data *tid_data;
	unsigned long flags;
	unsigned long flags;
	u16 txq_id;
	u16 txq_id;
@@ -545,7 +548,7 @@ int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
	tid_data = &trans->shrd->tid_data[sta_id][tid];
	tid_data = &trans->shrd->tid_data[sta_id][tid];
	*ssn = SEQ_TO_SN(tid_data->seq_number);
	*ssn = SEQ_TO_SN(tid_data->seq_number);
	tid_data->agg.txq_id = txq_id;
	tid_data->agg.txq_id = txq_id;
	iwl_set_swq_id(&priv->txq[txq_id], get_ac_from_tid(tid), txq_id);
	iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);


	tid_data = &trans->shrd->tid_data[sta_id][tid];
	tid_data = &trans->shrd->tid_data[sta_id][tid];
	if (tid_data->tfds_in_queue == 0) {
	if (tid_data->tfds_in_queue == 0) {
@@ -564,24 +567,26 @@ int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,


void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id)
void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id)
{
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	iwlagn_tx_queue_stop_scheduler(trans, txq_id);
	iwlagn_tx_queue_stop_scheduler(trans, txq_id);


	iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
	iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));


	priv(trans)->txq[txq_id].q.read_ptr = 0;
	trans_pcie->txq[txq_id].q.read_ptr = 0;
	priv(trans)->txq[txq_id].q.write_ptr = 0;
	trans_pcie->txq[txq_id].q.write_ptr = 0;
	/* supposes that ssn_idx is valid (!= 0xFFF) */
	/* supposes that ssn_idx is valid (!= 0xFFF) */
	iwl_trans_set_wr_ptrs(trans, txq_id, 0);
	iwl_trans_set_wr_ptrs(trans, txq_id, 0);


	iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
	iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
	iwl_txq_ctx_deactivate(priv(trans), txq_id);
	iwl_txq_ctx_deactivate(trans_pcie, txq_id);
	iwl_trans_tx_queue_set_status(trans, &priv(trans)->txq[txq_id], 0, 0);
	iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
}
}


int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
				  enum iwl_rxon_context_id ctx, int sta_id,
				  enum iwl_rxon_context_id ctx, int sta_id,
				  int tid)
				  int tid)
{
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	unsigned long flags;
	unsigned long flags;
	int read_ptr, write_ptr;
	int read_ptr, write_ptr;
	struct iwl_tid_data *tid_data;
	struct iwl_tid_data *tid_data;
@@ -621,8 +626,8 @@ int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
				"or starting\n");
				"or starting\n");
	}
	}


	write_ptr = priv(trans)->txq[txq_id].q.write_ptr;
	write_ptr = trans_pcie->txq[txq_id].q.write_ptr;
	read_ptr = priv(trans)->txq[txq_id].q.read_ptr;
	read_ptr = trans_pcie->txq[txq_id].q.read_ptr;


	/* The queue is not empty */
	/* The queue is not empty */
	if (write_ptr != read_ptr) {
	if (write_ptr != read_ptr) {
@@ -663,7 +668,8 @@ turn_off:
 */
 */
static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
{
{
	struct iwl_tx_queue *txq = &priv(trans)->txq[trans->shrd->cmd_queue];
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
	struct iwl_queue *q = &txq->q;
	struct iwl_queue *q = &txq->q;
	struct iwl_device_cmd *out_cmd;
	struct iwl_device_cmd *out_cmd;
	struct iwl_cmd_meta *out_meta;
	struct iwl_cmd_meta *out_meta;
@@ -852,7 +858,9 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
 */
 */
static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx)
static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx)
{
{
	struct iwl_tx_queue *txq = &priv->txq[txq_id];
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans(priv));
	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
	struct iwl_queue *q = &txq->q;
	struct iwl_queue *q = &txq->q;
	int nfreed = 0;
	int nfreed = 0;


@@ -893,7 +901,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
	struct iwl_device_cmd *cmd;
	struct iwl_device_cmd *cmd;
	struct iwl_cmd_meta *meta;
	struct iwl_cmd_meta *meta;
	struct iwl_trans *trans = trans(priv);
	struct iwl_trans *trans = trans(priv);
	struct iwl_tx_queue *txq = &priv->txq[trans->shrd->cmd_queue];
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue];
	unsigned long flags;
	unsigned long flags;


	/* If a Tx command is being handled and it isn't in the actual
	/* If a Tx command is being handled and it isn't in the actual
@@ -902,8 +911,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
	if (WARN(txq_id != trans->shrd->cmd_queue,
	if (WARN(txq_id != trans->shrd->cmd_queue,
		 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
		 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
		  txq_id, trans->shrd->cmd_queue, sequence,
		  txq_id, trans->shrd->cmd_queue, sequence,
		  priv->txq[trans->shrd->cmd_queue].q.read_ptr,
		  trans_pcie->txq[trans->shrd->cmd_queue].q.read_ptr,
		  priv->txq[trans->shrd->cmd_queue].q.write_ptr)) {
		  trans_pcie->txq[trans->shrd->cmd_queue].q.write_ptr)) {
		iwl_print_hex_error(priv, pkt, 32);
		iwl_print_hex_error(priv, pkt, 32);
		return;
		return;
	}
	}
@@ -1072,6 +1081,7 @@ static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)


static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
{
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int cmd_idx;
	int cmd_idx;
	int ret;
	int ret;


@@ -1144,7 +1154,7 @@ cancel:
		 * in later, it will possibly set an invalid
		 * in later, it will possibly set an invalid
		 * address (cmd->meta.source).
		 * address (cmd->meta.source).
		 */
		 */
		priv(trans)->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
		trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
							~CMD_WANT_SKB;
							~CMD_WANT_SKB;
	}
	}
fail:
fail:
@@ -1181,7 +1191,8 @@ int iwl_trans_pcie_send_cmd_pdu(struct iwl_trans *trans, u8 id, u32 flags,
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
			 struct sk_buff_head *skbs)
			 struct sk_buff_head *skbs)
{
{
	struct iwl_tx_queue *txq = &priv(trans)->txq[txq_id];
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
	struct iwl_queue *q = &txq->q;
	struct iwl_queue *q = &txq->q;
	int last_to_free;
	int last_to_free;
	int freed = 0;
	int freed = 0;
Loading