Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit df833b1d authored by Reinette Chatre's avatar Reinette Chatre Committed by John W. Linville
Browse files

iwlwifi: DMA fixes

A few issues wrt DMA were uncovered when using the driver with swiotlb.
- driver should not use memory after it has been mapped
- iwl3945's RX queue management cannot use all of iwlagn because
  the size of the RX buffer is different. Revert back to using
  iwl3945 specific routines that map/unmap memory.
- no need to "dma_syn_single_range_for_cpu" followed by pci_unmap_single,
  we can just call pci_unmap_single initially
- only map the memory area that will be used by device. this is especially
  relevant to the mapping of iwl_cmd. we should not map the entire
  structure because the meta data at the beginning of structure contains
  the address to be used later for unmapping. If the address to be used for
  unmapping is stored in mapped data it creates a problem.
- ensure that _if_ memory needs to be modified after it is mapped that we
  call _sync_single_for_cpu first, and then release it back to device with
  _sync_single_for_device
- we mapped the wrong length of data for host commands, with mapped length
  differing with length provided to device, fix that.

Thanks to Jason Andryuk <jandryuk@gmail.com> for significant bisecting
help to find these issues.

This fixes http://www.intellinuxwireless.org/bugzilla/show_bug.cgi?id=1964



Signed-off-by: default avatarReinette Chatre <reinette.chatre@intel.com>
Tested-by: default avatarJason Andryuk <jandryuk@gmail.com>
Tested-by: default avatarBen Gamari <bgamari@gmail.com>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent d2ee9cd2
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -1192,7 +1192,7 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv)
			return -ENOMEM;
			return -ENOMEM;
		}
		}
	} else
	} else
		iwl_rx_queue_reset(priv, rxq);
		iwl3945_rx_queue_reset(priv, rxq);


	iwl3945_rx_replenish(priv);
	iwl3945_rx_replenish(priv);


+1 −0
Original line number Original line Diff line number Diff line
@@ -215,6 +215,7 @@ extern int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm);
extern int iwl3945_tx_queue_init(struct iwl_priv *priv,
extern int iwl3945_tx_queue_init(struct iwl_priv *priv,
			     struct iwl_tx_queue *txq, int count, u32 id);
			     struct iwl_tx_queue *txq, int count, u32 id);
extern void iwl3945_rx_replenish(void *data);
extern void iwl3945_rx_replenish(void *data);
extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
extern void iwl3945_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq);
extern void iwl3945_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq);
extern int iwl3945_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len,
extern int iwl3945_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len,
			    const void *data);
			    const void *data);
+3 −8
Original line number Original line Diff line number Diff line
@@ -976,10 +976,8 @@ void iwl_rx_handle(struct iwl_priv *priv)


		rxq->queue[i] = NULL;
		rxq->queue[i] = NULL;


		dma_sync_single_range_for_cpu(
		pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
				&priv->pci_dev->dev, rxb->real_dma_addr,
				 priv->hw_params.rx_buf_size + 256,
				rxb->aligned_dma_addr - rxb->real_dma_addr,
				priv->hw_params.rx_buf_size,
				 PCI_DMA_FROMDEVICE);
				 PCI_DMA_FROMDEVICE);
		pkt = (struct iwl_rx_packet *)rxb->skb->data;
		pkt = (struct iwl_rx_packet *)rxb->skb->data;


@@ -1031,9 +1029,6 @@ void iwl_rx_handle(struct iwl_priv *priv)
			rxb->skb = NULL;
			rxb->skb = NULL;
		}
		}


		pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
				 priv->hw_params.rx_buf_size + 256,
				 PCI_DMA_FROMDEVICE);
		spin_lock_irqsave(&rxq->lock, flags);
		spin_lock_irqsave(&rxq->lock, flags);
		list_add_tail(&rxb->list, &priv->rxq.rx_used);
		list_add_tail(&rxb->list, &priv->rxq.rx_used);
		spin_unlock_irqrestore(&rxq->lock, flags);
		spin_unlock_irqrestore(&rxq->lock, flags);
+4 −0
Original line number Original line Diff line number Diff line
@@ -360,12 +360,16 @@ struct iwl_host_cmd {


/**
/**
 * struct iwl_rx_queue - Rx queue
 * struct iwl_rx_queue - Rx queue
 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
 * @dma_addr: bus address of buffer of receive buffer descriptors (rbd)
 * @read: Shared index to newest available Rx buffer
 * @read: Shared index to newest available Rx buffer
 * @write: Shared index to oldest written Rx packet
 * @write: Shared index to oldest written Rx packet
 * @free_count: Number of pre-allocated buffers in rx_free
 * @free_count: Number of pre-allocated buffers in rx_free
 * @rx_free: list of free SKBs for use
 * @rx_free: list of free SKBs for use
 * @rx_used: List of Rx buffers with no SKB
 * @rx_used: List of Rx buffers with no SKB
 * @need_update: flag to indicate we need to update read/write index
 * @need_update: flag to indicate we need to update read/write index
 * @rb_stts: driver's pointer to receive buffer status
 * @rb_stts_dma: bus address of receive buffer status
 *
 *
 * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
 * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
 */
 */
+53 −42
Original line number Original line Diff line number Diff line
@@ -799,6 +799,22 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
	/* Copy MAC header from skb into command buffer */
	/* Copy MAC header from skb into command buffer */
	memcpy(tx_cmd->hdr, hdr, hdr_len);
	memcpy(tx_cmd->hdr, hdr, hdr_len);



	/* Total # bytes to be transmitted */
	len = (u16)skb->len;
	tx_cmd->len = cpu_to_le16(len);

	if (info->control.hw_key)
		iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);

	/* TODO need this for burst mode later on */
	iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);

	/* set is_hcca to 0; it probably will never be implemented */
	iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0);

	iwl_update_tx_stats(priv, le16_to_cpu(fc), len);

	/*
	/*
	 * Use the first empty entry in this queue's command buffer array
	 * Use the first empty entry in this queue's command buffer array
	 * to contain the Tx command and MAC header concatenated together
	 * to contain the Tx command and MAC header concatenated together
@@ -819,21 +835,30 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
	else
	else
		len_org = 0;
		len_org = 0;


	/* Tell NIC about any 2-byte padding after MAC header */
	if (len_org)
		tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;

	/* Physical address of this Tx command's header (not MAC header!),
	/* Physical address of this Tx command's header (not MAC header!),
	 * within command buffer array. */
	 * within command buffer array. */
	txcmd_phys = pci_map_single(priv->pci_dev,
	txcmd_phys = pci_map_single(priv->pci_dev,
				    out_cmd, sizeof(struct iwl_cmd),
				    &out_cmd->hdr, len,
				    PCI_DMA_BIDIRECTIONAL);
				    PCI_DMA_BIDIRECTIONAL);
	pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
	pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
	pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd));
	pci_unmap_len_set(&out_cmd->meta, len, len);
	/* Add buffer containing Tx command and MAC(!) header to TFD's
	/* Add buffer containing Tx command and MAC(!) header to TFD's
	 * first entry */
	 * first entry */
	txcmd_phys += offsetof(struct iwl_cmd, hdr);
	priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
	priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
						   txcmd_phys, len, 1, 0);
						   txcmd_phys, len, 1, 0);


	if (info->control.hw_key)
	if (!ieee80211_has_morefrags(hdr->frame_control)) {
		iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
		txq->need_update = 1;
		if (qc)
			priv->stations[sta_id].tid[tid].seq_number = seq_number;
	} else {
		wait_write_ptr = 1;
		txq->need_update = 0;
	}


	/* Set up TFD's 2nd entry to point directly to remainder of skb,
	/* Set up TFD's 2nd entry to point directly to remainder of skb,
	 * if any (802.11 null frames have no payload). */
	 * if any (802.11 null frames have no payload). */
@@ -846,35 +871,17 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
							   0, 0);
							   0, 0);
	}
	}


	/* Tell NIC about any 2-byte padding after MAC header */
	if (len_org)
		tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;

	/* Total # bytes to be transmitted */
	len = (u16)skb->len;
	tx_cmd->len = cpu_to_le16(len);
	/* TODO need this for burst mode later on */
	iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);

	/* set is_hcca to 0; it probably will never be implemented */
	iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0);

	iwl_update_tx_stats(priv, le16_to_cpu(fc), len);

	scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
	scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
				offsetof(struct iwl_tx_cmd, scratch);
				offsetof(struct iwl_tx_cmd, scratch);

	len = sizeof(struct iwl_tx_cmd) +
		sizeof(struct iwl_cmd_header) + hdr_len;
	/* take back ownership of DMA buffer to enable update */
	pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
				    len, PCI_DMA_BIDIRECTIONAL);
	tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
	tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
	tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
	tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);


	if (!ieee80211_has_morefrags(hdr->frame_control)) {
		txq->need_update = 1;
		if (qc)
			priv->stations[sta_id].tid[tid].seq_number = seq_number;
	} else {
		wait_write_ptr = 1;
		txq->need_update = 0;
	}

	IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
	IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
		     le16_to_cpu(out_cmd->hdr.sequence));
		     le16_to_cpu(out_cmd->hdr.sequence));
	IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
	IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
@@ -882,7 +889,11 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
	iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
	iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);


	/* Set up entry for this TFD in Tx byte-count array */
	/* Set up entry for this TFD in Tx byte-count array */
	priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, len);
	priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
						     le16_to_cpu(tx_cmd->len));

	pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
				       len, PCI_DMA_BIDIRECTIONAL);


	/* Tell device the write index *just past* this latest filled TFD */
	/* Tell device the write index *just past* this latest filled TFD */
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
@@ -970,18 +981,9 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
			INDEX_TO_SEQ(q->write_ptr));
			INDEX_TO_SEQ(q->write_ptr));
	if (out_cmd->meta.flags & CMD_SIZE_HUGE)
	if (out_cmd->meta.flags & CMD_SIZE_HUGE)
		out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
		out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
	len = (idx == TFD_CMD_SLOTS) ?
	len = sizeof(struct iwl_cmd) - sizeof(struct iwl_cmd_meta);
			IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd);
	len += (idx == TFD_CMD_SLOTS) ?  IWL_MAX_SCAN_SIZE : 0;

	phys_addr = pci_map_single(priv->pci_dev, out_cmd,
				   len, PCI_DMA_BIDIRECTIONAL);
	pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr);
	pci_unmap_len_set(&out_cmd->meta, len, len);
	phys_addr += offsetof(struct iwl_cmd, hdr);


	priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
						   phys_addr, fix_size, 1,
						   U32_PAD(cmd->len));


#ifdef CONFIG_IWLWIFI_DEBUG
#ifdef CONFIG_IWLWIFI_DEBUG
	switch (out_cmd->hdr.cmd) {
	switch (out_cmd->hdr.cmd) {
@@ -1009,6 +1011,15 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
		/* Set up entry in queue's byte count circular buffer */
		/* Set up entry in queue's byte count circular buffer */
		priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
		priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);


	phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
				   fix_size, PCI_DMA_BIDIRECTIONAL);
	pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr);
	pci_unmap_len_set(&out_cmd->meta, len, fix_size);

	priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
						   phys_addr, fix_size, 1,
						   U32_PAD(cmd->len));

	/* Increment and update queue's write index */
	/* Increment and update queue's write index */
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
	q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
	ret = iwl_txq_update_write_ptr(priv, txq);
	ret = iwl_txq_update_write_ptr(priv, txq);
Loading