Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7a56dc88 authored by Sagi Grimberg's avatar Sagi Grimberg Committed by Nicholas Bellinger
Browse files

iser-target: avoid posting a recv buffer twice



We pre-allocate our send-queues and might overflow them
in case we have multi work-request operations which tend
to occur for large RDMA transfers over devices with limited
allowed sg elements. When we get to a queue-full condition
we might retry again later, so track our receive buffers
so we don't repost them for a retry case.

Reported-by: default avatarPotnuri Bharat Teja <bharat@chelsio.com>
Tested-by: default avatarPotnuri Bharat Teja <bharat@chelsio.com>
Reviewed-by: default avatarPotnuri Bharat Teja <bharat@chelsio.com>
Signed-off-by: default avatarSagi Grimberg <sagi@grimberg.me>
Signed-off-by: default avatarNicholas Bellinger <nab@linux-iscsi.org>
parent 555a65f6
Loading
Loading
Loading
Loading
+12 −0
Original line number Original line Diff line number Diff line
@@ -817,6 +817,7 @@ isert_post_recvm(struct isert_conn *isert_conn, u32 count)
		rx_wr->sg_list = &rx_desc->rx_sg;
		rx_wr->sg_list = &rx_desc->rx_sg;
		rx_wr->num_sge = 1;
		rx_wr->num_sge = 1;
		rx_wr->next = rx_wr + 1;
		rx_wr->next = rx_wr + 1;
		rx_desc->in_use = false;
	}
	}
	rx_wr--;
	rx_wr--;
	rx_wr->next = NULL; /* mark end of work requests list */
	rx_wr->next = NULL; /* mark end of work requests list */
@@ -835,6 +836,15 @@ isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
	struct ib_recv_wr *rx_wr_failed, rx_wr;
	struct ib_recv_wr *rx_wr_failed, rx_wr;
	int ret;
	int ret;


	if (!rx_desc->in_use) {
		/*
		 * if the descriptor is not in-use we already reposted it
		 * for recv, so just silently return
		 */
		return 0;
	}

	rx_desc->in_use = false;
	rx_wr.wr_cqe = &rx_desc->rx_cqe;
	rx_wr.wr_cqe = &rx_desc->rx_cqe;
	rx_wr.sg_list = &rx_desc->rx_sg;
	rx_wr.sg_list = &rx_desc->rx_sg;
	rx_wr.num_sge = 1;
	rx_wr.num_sge = 1;
@@ -1397,6 +1407,8 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
		return;
		return;
	}
	}


	rx_desc->in_use = true;

	ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
	ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
			ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
			ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);


+2 −1
Original line number Original line Diff line number Diff line
@@ -60,7 +60,7 @@


#define ISER_RX_PAD_SIZE	(ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \
#define ISER_RX_PAD_SIZE	(ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \
		(ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \
		(ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \
		 sizeof(struct ib_cqe)))
		 sizeof(struct ib_cqe) + sizeof(bool)))


#define ISCSI_ISER_SG_TABLESIZE		256
#define ISCSI_ISER_SG_TABLESIZE		256


@@ -85,6 +85,7 @@ struct iser_rx_desc {
	u64		dma_addr;
	u64		dma_addr;
	struct ib_sge	rx_sg;
	struct ib_sge	rx_sg;
	struct ib_cqe	rx_cqe;
	struct ib_cqe	rx_cqe;
	bool		in_use;
	char		pad[ISER_RX_PAD_SIZE];
	char		pad[ISER_RX_PAD_SIZE];
} __packed;
} __packed;