Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f1bc4ac6 authored by Zhu Yi's avatar Zhu Yi Committed by John W. Linville
Browse files

iwlwifi: use GFP_KERNEL to allocate Rx SKB memory



Previously we allocate Rx SKB with GFP_ATOMIC flag. This is because we need
to hold a spinlock to protect the two rx_used and rx_free lists operation
in the rxq.

	spin_lock();
	...
	element = rxq->rx_used.next;
	element->skb = alloc_skb(..., GFP_ATOMIC);
	list_del(element);
	list_add_tail(&element->list, &rxq->rx_free);
	...
	spin_unlock();

After spliting the rx_used delete and rx_free insert into two operations,
we don't require the skb allocation in an atomic context any more (the
function itself is scheduled in a workqueue).

	spin_lock();
	...
	element = rxq->rx_used.next;
	list_del(element);
	...
	spin_unlock();
	...
	element->skb = alloc_skb(..., GFP_KERNEL);
	...
	spin_lock()
	...
	list_add_tail(&element->list, &rxq->rx_free);
	...
	spin_unlock();

This patch should fix the "iwlagn: Can not allocate SKB buffers" warning
we see recently.

Signed-off-by: default avatarZhu Yi <yi.zhu@intel.com>
Acked-by: default avatarTomas Winkler <tomas.winkler@intel.com>
Cc: stable@kernel.org
Signed-off-by: default avatarJohn W. Linville <linville@tuxdriver.com>
parent 4087f6f6
Loading
Loading
Loading
Loading
+1 −11
Original line number Original line Diff line number Diff line
@@ -1110,16 +1110,6 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
	priv->cfg->ops->lib->rx_handler_setup(priv);
	priv->cfg->ops->lib->rx_handler_setup(priv);
}
}


/*
 * this should be called while priv->lock is locked
*/
static void __iwl_rx_replenish(struct iwl_priv *priv)
{
	iwl_rx_allocate(priv);
	iwl_rx_queue_restock(priv);
}


/**
/**
 * iwl_rx_handle - Main entry function for receiving responses from uCode
 * iwl_rx_handle - Main entry function for receiving responses from uCode
 *
 *
@@ -1228,7 +1218,7 @@ void iwl_rx_handle(struct iwl_priv *priv)
			count++;
			count++;
			if (count >= 8) {
			if (count >= 8) {
				priv->rxq.read = i;
				priv->rxq.read = i;
				__iwl_rx_replenish(priv);
				iwl_rx_queue_restock(priv);
				count = 0;
				count = 0;
			}
			}
		}
		}
+19 −10
Original line number Original line Diff line number Diff line
@@ -244,25 +244,31 @@ void iwl_rx_allocate(struct iwl_priv *priv)
	struct list_head *element;
	struct list_head *element;
	struct iwl_rx_mem_buffer *rxb;
	struct iwl_rx_mem_buffer *rxb;
	unsigned long flags;
	unsigned long flags;

	while (1) {
		spin_lock_irqsave(&rxq->lock, flags);
		spin_lock_irqsave(&rxq->lock, flags);
	while (!list_empty(&rxq->rx_used)) {

		if (list_empty(&rxq->rx_used)) {
			spin_unlock_irqrestore(&rxq->lock, flags);
			return;
		}
		element = rxq->rx_used.next;
		element = rxq->rx_used.next;
		rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
		rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
		list_del(element);

		spin_unlock_irqrestore(&rxq->lock, flags);


		/* Alloc a new receive buffer */
		/* Alloc a new receive buffer */
		rxb->skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
		rxb->skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
				__GFP_NOWARN | GFP_ATOMIC);
				     GFP_KERNEL);
		if (!rxb->skb) {
		if (!rxb->skb) {
			if (net_ratelimit())
			printk(KERN_CRIT DRV_NAME
			printk(KERN_CRIT DRV_NAME
				       ": Can not allocate SKB buffers\n");
				   "Can not allocate SKB buffers\n");
			/* We don't reschedule replenish work here -- we will
			/* We don't reschedule replenish work here -- we will
			 * call the restock method and if it still needs
			 * call the restock method and if it still needs
			 * more buffers it will schedule replenish */
			 * more buffers it will schedule replenish */
			break;
			break;
		}
		}
		priv->alloc_rxb_skb++;
		list_del(element);


		/* Get physical address of RB/SKB */
		/* Get physical address of RB/SKB */
		rxb->real_dma_addr = pci_map_single(
		rxb->real_dma_addr = pci_map_single(
@@ -276,12 +282,15 @@ void iwl_rx_allocate(struct iwl_priv *priv)
		rxb->aligned_dma_addr = ALIGN(rxb->real_dma_addr, 256);
		rxb->aligned_dma_addr = ALIGN(rxb->real_dma_addr, 256);
		skb_reserve(rxb->skb, rxb->aligned_dma_addr - rxb->real_dma_addr);
		skb_reserve(rxb->skb, rxb->aligned_dma_addr - rxb->real_dma_addr);


		spin_lock_irqsave(&rxq->lock, flags);

		list_add_tail(&rxb->list, &rxq->rx_free);
		list_add_tail(&rxb->list, &rxq->rx_free);
		rxq->free_count++;
		rxq->free_count++;
	}
		priv->alloc_rxb_skb++;

		spin_unlock_irqrestore(&rxq->lock, flags);
		spin_unlock_irqrestore(&rxq->lock, flags);
	}
	}
EXPORT_SYMBOL(iwl_rx_allocate);
}


void iwl_rx_replenish(struct iwl_priv *priv)
void iwl_rx_replenish(struct iwl_priv *priv)
{
{