Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f86af2df authored by Manish Chopra's avatar Manish Chopra Committed by David S. Miller
Browse files

qede: Fix various memory allocation error flows for fastpath



This patch handles memory allocation failures for fastpath
gracefully in the driver.

Signed-off-by: default avatarManish Chopra <manish.chopra@qlogic.com>
Signed-off-by: default avatarYuval Mintz <yuval.mintz@qlogic.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5bec11cf
Loading
Loading
Loading
Loading
+85 −55
Original line number Original line Diff line number Diff line
@@ -750,6 +750,12 @@ static bool qede_has_tx_work(struct qede_fastpath *fp)
	return false;
	return false;
}
}


static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
{
	qed_chain_consume(&rxq->rx_bd_ring);
	rxq->sw_rx_cons++;
}

/* This function reuses the buffer(from an offset) from
/* This function reuses the buffer(from an offset) from
 * consumer index to producer index in the bd ring
 * consumer index to producer index in the bd ring
 */
 */
@@ -773,6 +779,21 @@ static inline void qede_reuse_page(struct qede_dev *edev,
	curr_cons->data = NULL;
	curr_cons->data = NULL;
}
}


/* In case of allocation failures reuse buffers
 * from consumer index to produce buffers for firmware
 */
static void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
				    struct qede_dev *edev, u8 count)
{
	struct sw_rx_data *curr_cons;

	for (; count > 0; count--) {
		curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
		qede_reuse_page(edev, rxq, curr_cons);
		qede_rx_bd_ring_consume(rxq);
	}
}

static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
					 struct qede_rx_queue *rxq,
					 struct qede_rx_queue *rxq,
					 struct sw_rx_data *curr_cons)
					 struct sw_rx_data *curr_cons)
@@ -781,8 +802,14 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
	curr_cons->page_offset += rxq->rx_buf_seg_size;
	curr_cons->page_offset += rxq->rx_buf_seg_size;


	if (curr_cons->page_offset == PAGE_SIZE) {
	if (curr_cons->page_offset == PAGE_SIZE) {
		if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
		if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
			/* Since we failed to allocate new buffer
			 * current buffer can be used again.
			 */
			curr_cons->page_offset -= rxq->rx_buf_seg_size;

			return -ENOMEM;
			return -ENOMEM;
		}


		dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
		dma_unmap_page(&edev->pdev->dev, curr_cons->mapping,
			       PAGE_SIZE, DMA_FROM_DEVICE);
			       PAGE_SIZE, DMA_FROM_DEVICE);
@@ -901,7 +928,10 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
			   len_on_bd);
			   len_on_bd);


	if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
	if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) {
		tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
		/* Incr page ref count to reuse on allocation failure
		 * so that it doesn't get freed while freeing SKB.
		 */
		atomic_inc(&current_bd->data->_count);
		goto out;
		goto out;
	}
	}


@@ -915,6 +945,8 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
	return 0;
	return 0;


out:
out:
	tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
	qede_recycle_rx_bd_ring(rxq, edev, 1);
	return -ENOMEM;
	return -ENOMEM;
}
}


@@ -966,8 +998,9 @@ static void qede_tpa_start(struct qede_dev *edev,
	tpa_info->skb = netdev_alloc_skb(edev->ndev,
	tpa_info->skb = netdev_alloc_skb(edev->ndev,
					 le16_to_cpu(cqe->len_on_first_bd));
					 le16_to_cpu(cqe->len_on_first_bd));
	if (unlikely(!tpa_info->skb)) {
	if (unlikely(!tpa_info->skb)) {
		DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
		tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
		tpa_info->agg_state = QEDE_AGG_STATE_ERROR;
		return;
		goto cons_buf;
	}
	}


	skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
	skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
@@ -990,6 +1023,7 @@ static void qede_tpa_start(struct qede_dev *edev,
	/* This is needed in order to enable forwarding support */
	/* This is needed in order to enable forwarding support */
	qede_set_gro_params(edev, tpa_info->skb, cqe);
	qede_set_gro_params(edev, tpa_info->skb, cqe);


cons_buf: /* We still need to handle bd_len_list to consume buffers */
	if (likely(cqe->ext_bd_len_list[0]))
	if (likely(cqe->ext_bd_len_list[0]))
		qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
		qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
				   le16_to_cpu(cqe->ext_bd_len_list[0]));
				   le16_to_cpu(cqe->ext_bd_len_list[0]));
@@ -1244,17 +1278,17 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
				  "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
				  "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
				  sw_comp_cons, parse_flag);
				  sw_comp_cons, parse_flag);
			rxq->rx_hw_errors++;
			rxq->rx_hw_errors++;
			qede_reuse_page(edev, rxq, sw_rx_data);
			qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
			goto next_rx;
			goto next_cqe;
		}
		}


		skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
		skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
		if (unlikely(!skb)) {
		if (unlikely(!skb)) {
			DP_NOTICE(edev,
			DP_NOTICE(edev,
				  "Build_skb failed, dropping incoming packet\n");
				  "Build_skb failed, dropping incoming packet\n");
			qede_reuse_page(edev, rxq, sw_rx_data);
			qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num);
			rxq->rx_alloc_errors++;
			rxq->rx_alloc_errors++;
			goto next_rx;
			goto next_cqe;
		}
		}


		/* Copy data into SKB */
		/* Copy data into SKB */
@@ -1288,11 +1322,22 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
			if (unlikely(qede_realloc_rx_buffer(edev, rxq,
			if (unlikely(qede_realloc_rx_buffer(edev, rxq,
							    sw_rx_data))) {
							    sw_rx_data))) {
				DP_ERR(edev, "Failed to allocate rx buffer\n");
				DP_ERR(edev, "Failed to allocate rx buffer\n");
				/* Incr page ref count to reuse on allocation
				 * failure so that it doesn't get freed while
				 * freeing SKB.
				 */

				atomic_inc(&sw_rx_data->data->_count);
				rxq->rx_alloc_errors++;
				rxq->rx_alloc_errors++;
				qede_recycle_rx_bd_ring(rxq, edev,
							fp_cqe->bd_num);
				dev_kfree_skb_any(skb);
				goto next_cqe;
				goto next_cqe;
			}
			}
		}
		}


		qede_rx_bd_ring_consume(rxq);

		if (fp_cqe->bd_num != 1) {
		if (fp_cqe->bd_num != 1) {
			u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
			u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len);
			u8 num_frags;
			u8 num_frags;
@@ -1303,18 +1348,27 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
			     num_frags--) {
			     num_frags--) {
				u16 cur_size = pkt_len > rxq->rx_buf_size ?
				u16 cur_size = pkt_len > rxq->rx_buf_size ?
						rxq->rx_buf_size : pkt_len;
						rxq->rx_buf_size : pkt_len;

				if (unlikely(!cur_size)) {
				WARN_ONCE(!cur_size,
					DP_ERR(edev,
					       "Still got %d BDs for mapping jumbo, but length became 0\n",
					       "Still got %d BDs for mapping jumbo, but length became 0\n",
					       num_frags);
					       num_frags);
					qede_recycle_rx_bd_ring(rxq, edev,
								num_frags);
					dev_kfree_skb_any(skb);
					goto next_cqe;
				}


				if (unlikely(qede_alloc_rx_buffer(edev, rxq)))
				if (unlikely(qede_alloc_rx_buffer(edev, rxq))) {
					qede_recycle_rx_bd_ring(rxq, edev,
								num_frags);
					dev_kfree_skb_any(skb);
					goto next_cqe;
					goto next_cqe;
				}


				rxq->sw_rx_cons++;
				sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
				sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
				sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
				sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
				qed_chain_consume(&rxq->rx_bd_ring);
				qede_rx_bd_ring_consume(rxq);

				dma_unmap_page(&edev->pdev->dev,
				dma_unmap_page(&edev->pdev->dev,
					       sw_rx_data->mapping,
					       sw_rx_data->mapping,
					       PAGE_SIZE, DMA_FROM_DEVICE);
					       PAGE_SIZE, DMA_FROM_DEVICE);
@@ -1330,7 +1384,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
				pkt_len -= cur_size;
				pkt_len -= cur_size;
			}
			}


			if (pkt_len)
			if (unlikely(pkt_len))
				DP_ERR(edev,
				DP_ERR(edev,
				       "Mapped all BDs of jumbo, but still have %d bytes\n",
				       "Mapped all BDs of jumbo, but still have %d bytes\n",
				       pkt_len);
				       pkt_len);
@@ -1349,10 +1403,6 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
		skb_record_rx_queue(skb, fp->rss_id);
		skb_record_rx_queue(skb, fp->rss_id);


		qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));
		qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag));

		qed_chain_consume(&rxq->rx_bd_ring);
next_rx:
		rxq->sw_rx_cons++;
next_rx_only:
next_rx_only:
		rx_pkt++;
		rx_pkt++;


@@ -2257,7 +2307,7 @@ static void qede_free_sge_mem(struct qede_dev *edev,
		struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
		struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
		struct sw_rx_data *replace_buf = &tpa_info->replace_buf;
		struct sw_rx_data *replace_buf = &tpa_info->replace_buf;


		if (replace_buf) {
		if (replace_buf->data) {
			dma_unmap_page(&edev->pdev->dev,
			dma_unmap_page(&edev->pdev->dev,
				       dma_unmap_addr(replace_buf, mapping),
				       dma_unmap_addr(replace_buf, mapping),
				       PAGE_SIZE, DMA_FROM_DEVICE);
				       PAGE_SIZE, DMA_FROM_DEVICE);
@@ -2377,7 +2427,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev,
static int qede_alloc_mem_rxq(struct qede_dev *edev,
static int qede_alloc_mem_rxq(struct qede_dev *edev,
			      struct qede_rx_queue *rxq)
			      struct qede_rx_queue *rxq)
{
{
	int i, rc, size, num_allocated;
	int i, rc, size;


	rxq->num_rx_buffers = edev->q_num_rx_buffers;
	rxq->num_rx_buffers = edev->q_num_rx_buffers;


@@ -2394,6 +2444,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
	rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
	rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
	if (!rxq->sw_rx_ring) {
	if (!rxq->sw_rx_ring) {
		DP_ERR(edev, "Rx buffers ring allocation failed\n");
		DP_ERR(edev, "Rx buffers ring allocation failed\n");
		rc = -ENOMEM;
		goto err;
		goto err;
	}
	}


@@ -2421,26 +2472,16 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
	/* Allocate buffers for the Rx ring */
	/* Allocate buffers for the Rx ring */
	for (i = 0; i < rxq->num_rx_buffers; i++) {
	for (i = 0; i < rxq->num_rx_buffers; i++) {
		rc = qede_alloc_rx_buffer(edev, rxq);
		rc = qede_alloc_rx_buffer(edev, rxq);
		if (rc)
		if (rc) {
			break;
			DP_ERR(edev,
	}
			       "Rx buffers allocation failed at index %d\n", i);
	num_allocated = i;
	if (!num_allocated) {
		DP_ERR(edev, "Rx buffers allocation failed\n");
			goto err;
			goto err;
	} else if (num_allocated < rxq->num_rx_buffers) {
		}
		DP_NOTICE(edev,
			  "Allocated less buffers than desired (%d allocated)\n",
			  num_allocated);
	}
	}


	qede_alloc_sge_mem(edev, rxq);
	rc = qede_alloc_sge_mem(edev, rxq);

	return 0;

err:
err:
	qede_free_mem_rxq(edev, rxq);
	return rc;
	return -ENOMEM;
}
}


static void qede_free_mem_txq(struct qede_dev *edev,
static void qede_free_mem_txq(struct qede_dev *edev,
@@ -2523,10 +2564,8 @@ static int qede_alloc_mem_fp(struct qede_dev *edev,
	}
	}


	return 0;
	return 0;

err:
err:
	qede_free_mem_fp(edev, fp);
	return rc;
	return -ENOMEM;
}
}


static void qede_free_mem_load(struct qede_dev *edev)
static void qede_free_mem_load(struct qede_dev *edev)
@@ -2549,22 +2588,13 @@ static int qede_alloc_mem_load(struct qede_dev *edev)
		struct qede_fastpath *fp = &edev->fp_array[rss_id];
		struct qede_fastpath *fp = &edev->fp_array[rss_id];


		rc = qede_alloc_mem_fp(edev, fp);
		rc = qede_alloc_mem_fp(edev, fp);
		if (rc)
		if (rc) {
			break;
	}

	if (rss_id != QEDE_RSS_CNT(edev)) {
		/* Failed allocating memory for all the queues */
		if (!rss_id) {
			DP_ERR(edev,
			DP_ERR(edev,
			       "Failed to allocate memory for the leading queue\n");
			       "Failed to allocate memory for fastpath - rss id = %d\n",
			rc = -ENOMEM;
			       rss_id);
		} else {
			qede_free_mem_load(edev);
			DP_NOTICE(edev,
			return rc;
				  "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n",
				  QEDE_RSS_CNT(edev), rss_id);
		}
		}
		edev->num_rss = rss_id;
	}
	}


	return 0;
	return 0;