Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b1fb1f28 authored by Divy Le Ray's avatar Divy Le Ray Committed by Jeff Garzik
Browse files

cxgb3 - Fix dma mapping error path



Take potential dma mapping errors in account.

Signed-off-by: default avatarDivy Le Ray <divy@chelsio.com>
Signed-off-by: default avatarJeff Garzik <jgarzik@redhat.com>
parent 03194379
Loading
Loading
Loading
Loading
+41 −12
Original line number Diff line number Diff line
@@ -376,13 +376,16 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
 *	Add a buffer of the given length to the supplied HW and SW Rx
 *	descriptors.
 */
static inline void add_one_rx_buf(void *va, unsigned int len,
static inline int add_one_rx_buf(void *va, unsigned int len,
				 struct rx_desc *d, struct rx_sw_desc *sd,
				 unsigned int gen, struct pci_dev *pdev)
{
	dma_addr_t mapping;

	mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
	if (unlikely(pci_dma_mapping_error(mapping)))
		return -ENOMEM;

	pci_unmap_addr_set(sd, dma_addr, mapping);

	d->addr_lo = cpu_to_be32(mapping);
@@ -390,6 +393,7 @@ static inline void add_one_rx_buf(void *va, unsigned int len,
	wmb();
	d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
	d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
	return 0;
}

static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp)
@@ -424,13 +428,16 @@ static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp)
 *	allocated with the supplied gfp flags.  The caller must assure that
 *	@n does not exceed the queue's capacity.
 */
static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
{
	void *buf_start;
	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
	struct rx_desc *d = &q->desc[q->pidx];
	unsigned int count = 0;

	while (n--) {
		int err;

		if (q->use_pages) {
			if (unlikely(alloc_pg_chunk(q, sd, gfp))) {
nomem:				q->alloc_failed++;
@@ -447,8 +454,16 @@ nomem: q->alloc_failed++;
			buf_start = skb->data;
		}

		add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen,
		err = add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen,
				     adap->pdev);
		if (unlikely(err)) {
			if (!q->use_pages) {
				kfree_skb(sd->skb);
				sd->skb = NULL;
			}
			break;
		}

		d++;
		sd++;
		if (++q->pidx == q->size) {
@@ -458,9 +473,13 @@ nomem: q->alloc_failed++;
			d = q->desc;
		}
		q->credits++;
		count++;
	}
	wmb();
	if (likely(count))
		t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));

	return count;
}

static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
@@ -2618,7 +2637,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
		      int irq_vec_idx, const struct qset_params *p,
		      int ntxq, struct net_device *dev)
{
	int i, ret = -ENOMEM;
	int i, avail, ret = -ENOMEM;
	struct sge_qset *q = &adapter->sge.qs[id];

	init_qset_cntxt(q, id);
@@ -2741,9 +2760,19 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
	q->adap = adapter;
	q->netdev = dev;
	t3_update_qset_coalesce(q, p);
	avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
	if (!avail) {
		CH_ALERT(adapter, "free list queue 0 initialization failed\n");
		goto err;
	}
	if (avail < q->fl[0].size)
		CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
			avail);

	refill_fl(adapter, &q->fl[0], q->fl[0].size, GFP_KERNEL);
	refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
	avail = refill_fl(adapter, &q->fl[1], q->fl[1].size, GFP_KERNEL);
	if (avail < q->fl[1].size)
		CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
			avail);
	refill_rspq(adapter, &q->rspq, q->rspq.size - 1);

	t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |