Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 96ebbe8d authored by Stanislaw Gruszka's avatar Stanislaw Gruszka Committed by John W. Linville
Browse files

iwlegacy: check for dma mapping errors



Handle pci_map_page() errors. This fixes "DMA-API: device driver failed
to check map error" warning.

Reported-by: default avatarZdenek Kabelac <zkabelac@redhat.com>
Signed-off-by: default avatarStanislaw Gruszka <sgruszka@redhat.com>
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent 58b27101
Loading
Loading
Loading
Loading
+27 −12
Original line number Original line Diff line number Diff line
@@ -1001,12 +1001,12 @@ il3945_rx_allocate(struct il_priv *il, gfp_t priority)
	struct list_head *element;
	struct list_head *element;
	struct il_rx_buf *rxb;
	struct il_rx_buf *rxb;
	struct page *page;
	struct page *page;
	dma_addr_t page_dma;
	unsigned long flags;
	unsigned long flags;
	gfp_t gfp_mask = priority;
	gfp_t gfp_mask = priority;


	while (1) {
	while (1) {
		spin_lock_irqsave(&rxq->lock, flags);
		spin_lock_irqsave(&rxq->lock, flags);

		if (list_empty(&rxq->rx_used)) {
		if (list_empty(&rxq->rx_used)) {
			spin_unlock_irqrestore(&rxq->lock, flags);
			spin_unlock_irqrestore(&rxq->lock, flags);
			return;
			return;
@@ -1035,26 +1035,34 @@ il3945_rx_allocate(struct il_priv *il, gfp_t priority)
			break;
			break;
		}
		}


		/* Get physical address of RB/SKB */
		page_dma =
		    pci_map_page(il->pci_dev, page, 0,
				 PAGE_SIZE << il->hw_params.rx_page_order,
				 PCI_DMA_FROMDEVICE);

		if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) {
			__free_pages(page, il->hw_params.rx_page_order);
			break;
		}

		spin_lock_irqsave(&rxq->lock, flags);
		spin_lock_irqsave(&rxq->lock, flags);

		if (list_empty(&rxq->rx_used)) {
		if (list_empty(&rxq->rx_used)) {
			spin_unlock_irqrestore(&rxq->lock, flags);
			spin_unlock_irqrestore(&rxq->lock, flags);
			pci_unmap_page(il->pci_dev, page_dma,
				       PAGE_SIZE << il->hw_params.rx_page_order,
				       PCI_DMA_FROMDEVICE);
			__free_pages(page, il->hw_params.rx_page_order);
			__free_pages(page, il->hw_params.rx_page_order);
			return;
			return;
		}
		}

		element = rxq->rx_used.next;
		element = rxq->rx_used.next;
		rxb = list_entry(element, struct il_rx_buf, list);
		rxb = list_entry(element, struct il_rx_buf, list);
		list_del(element);
		list_del(element);
		spin_unlock_irqrestore(&rxq->lock, flags);


		rxb->page = page;
		rxb->page = page;
		/* Get physical address of RB/SKB */
		rxb->page_dma = page_dma;
		rxb->page_dma =
		    pci_map_page(il->pci_dev, page, 0,
				 PAGE_SIZE << il->hw_params.rx_page_order,
				 PCI_DMA_FROMDEVICE);

		spin_lock_irqsave(&rxq->lock, flags);

		list_add_tail(&rxb->list, &rxq->rx_free);
		list_add_tail(&rxb->list, &rxq->rx_free);
		rxq->free_count++;
		rxq->free_count++;
		il->alloc_rxb_page++;
		il->alloc_rxb_page++;
@@ -1284,8 +1292,15 @@ il3945_rx_handle(struct il_priv *il)
			    pci_map_page(il->pci_dev, rxb->page, 0,
			    pci_map_page(il->pci_dev, rxb->page, 0,
					 PAGE_SIZE << il->hw_params.
					 PAGE_SIZE << il->hw_params.
					 rx_page_order, PCI_DMA_FROMDEVICE);
					 rx_page_order, PCI_DMA_FROMDEVICE);
			if (unlikely(pci_dma_mapping_error(il->pci_dev,
							   rxb->page_dma))) {
				__il_free_pages(il, rxb->page);
				rxb->page = NULL;
				list_add_tail(&rxb->list, &rxq->rx_used);
			} else {
				list_add_tail(&rxb->list, &rxq->rx_free);
				list_add_tail(&rxb->list, &rxq->rx_free);
				rxq->free_count++;
				rxq->free_count++;
			}
		} else
		} else
			list_add_tail(&rxb->list, &rxq->rx_used);
			list_add_tail(&rxb->list, &rxq->rx_used);


+27 −16
Original line number Original line Diff line number Diff line
@@ -319,6 +319,7 @@ il4965_rx_allocate(struct il_priv *il, gfp_t priority)
	struct list_head *element;
	struct list_head *element;
	struct il_rx_buf *rxb;
	struct il_rx_buf *rxb;
	struct page *page;
	struct page *page;
	dma_addr_t page_dma;
	unsigned long flags;
	unsigned long flags;
	gfp_t gfp_mask = priority;
	gfp_t gfp_mask = priority;


@@ -356,33 +357,35 @@ il4965_rx_allocate(struct il_priv *il, gfp_t priority)
			return;
			return;
		}
		}


		/* Get physical address of the RB */
		page_dma =
		    pci_map_page(il->pci_dev, page, 0,
				 PAGE_SIZE << il->hw_params.rx_page_order,
				 PCI_DMA_FROMDEVICE);
		if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) {
			__free_pages(page, il->hw_params.rx_page_order);
			break;
		}

		spin_lock_irqsave(&rxq->lock, flags);
		spin_lock_irqsave(&rxq->lock, flags);


		if (list_empty(&rxq->rx_used)) {
		if (list_empty(&rxq->rx_used)) {
			spin_unlock_irqrestore(&rxq->lock, flags);
			spin_unlock_irqrestore(&rxq->lock, flags);
			pci_unmap_page(il->pci_dev, page_dma,
				       PAGE_SIZE << il->hw_params.rx_page_order,
				       PCI_DMA_FROMDEVICE);
			__free_pages(page, il->hw_params.rx_page_order);
			__free_pages(page, il->hw_params.rx_page_order);
			return;
			return;
		}
		}

		element = rxq->rx_used.next;
		element = rxq->rx_used.next;
		rxb = list_entry(element, struct il_rx_buf, list);
		rxb = list_entry(element, struct il_rx_buf, list);
		list_del(element);
		list_del(element);


		spin_unlock_irqrestore(&rxq->lock, flags);

		BUG_ON(rxb->page);
		BUG_ON(rxb->page);
		rxb->page = page;
		/* Get physical address of the RB */
		rxb->page_dma =
		    pci_map_page(il->pci_dev, page, 0,
				 PAGE_SIZE << il->hw_params.rx_page_order,
				 PCI_DMA_FROMDEVICE);
		/* dma address must be no more than 36 bits */
		BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
		/* and also 256 byte aligned! */
		BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));

		spin_lock_irqsave(&rxq->lock, flags);


		rxb->page = page;
		rxb->page_dma = page_dma;
		list_add_tail(&rxb->list, &rxq->rx_free);
		list_add_tail(&rxb->list, &rxq->rx_free);
		rxq->free_count++;
		rxq->free_count++;
		il->alloc_rxb_page++;
		il->alloc_rxb_page++;
@@ -4292,8 +4295,16 @@ il4965_rx_handle(struct il_priv *il)
			    pci_map_page(il->pci_dev, rxb->page, 0,
			    pci_map_page(il->pci_dev, rxb->page, 0,
					 PAGE_SIZE << il->hw_params.
					 PAGE_SIZE << il->hw_params.
					 rx_page_order, PCI_DMA_FROMDEVICE);
					 rx_page_order, PCI_DMA_FROMDEVICE);

			if (unlikely(pci_dma_mapping_error(il->pci_dev,
							   rxb->page_dma))) {
				__il_free_pages(il, rxb->page);
				rxb->page = NULL;
				list_add_tail(&rxb->list, &rxq->rx_used);
			} else {
				list_add_tail(&rxb->list, &rxq->rx_free);
				list_add_tail(&rxb->list, &rxq->rx_free);
				rxq->free_count++;
				rxq->free_count++;
			}
		} else
		} else
			list_add_tail(&rxb->list, &rxq->rx_used);
			list_add_tail(&rxb->list, &rxq->rx_used);