Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6c621a22 authored by James Smart's avatar James Smart Committed by Martin K. Petersen
Browse files

scsi: lpfc: Separate NVMET RQ buffer posting from IO resources SGL/iocbq/context



Currently IO resources are mapped 1 to 1 with RQ buffers posted

Added logic to separate RQE buffers from IO op resources
(sgl/iocbq/context). During initialization, the driver will determine
how many SGLs it will allocate for NVMET (based on what the firmware
reports) and associate a NVMET IOCBq and NVMET context structure with
each one.

Now that hdr/data buffers are immediately reposted back to the RQ, 512
RQEs for each MRQ is sufficient. Also, since NVMET data buffers are now
128 bytes, lpfc_nvmet_mrq_post is not necessary anymore as we will
always post the max (512) buffers per NVMET MRQ.

Signed-off-by: default avatarDick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: default avatarJames Smart <james.smart@broadcom.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 3c603be9
Loading
Loading
Loading
Loading
+7 −4
Original line number Original line Diff line number Diff line
@@ -141,6 +141,13 @@ struct lpfc_dmabuf {
	uint32_t   buffer_tag;	/* used for tagged queue ring */
	uint32_t   buffer_tag;	/* used for tagged queue ring */
};
};


struct lpfc_nvmet_ctxbuf {
	struct list_head list;
	struct lpfc_nvmet_rcv_ctx *context;
	struct lpfc_iocbq *iocbq;
	struct lpfc_sglq *sglq;
};

struct lpfc_dma_pool {
struct lpfc_dma_pool {
	struct lpfc_dmabuf   *elements;
	struct lpfc_dmabuf   *elements;
	uint32_t    max_count;
	uint32_t    max_count;
@@ -163,9 +170,6 @@ struct rqb_dmabuf {
	struct lpfc_dmabuf dbuf;
	struct lpfc_dmabuf dbuf;
	uint16_t total_size;
	uint16_t total_size;
	uint16_t bytes_recv;
	uint16_t bytes_recv;
	void *context;
	struct lpfc_iocbq *iocbq;
	struct lpfc_sglq *sglq;
	struct lpfc_queue *hrq;	  /* ptr to associated Header RQ */
	struct lpfc_queue *hrq;	  /* ptr to associated Header RQ */
	struct lpfc_queue *drq;	  /* ptr to associated Data RQ */
	struct lpfc_queue *drq;	  /* ptr to associated Data RQ */
};
};
@@ -777,7 +781,6 @@ struct lpfc_hba {
	uint32_t cfg_nvme_oas;
	uint32_t cfg_nvme_oas;
	uint32_t cfg_nvme_io_channel;
	uint32_t cfg_nvme_io_channel;
	uint32_t cfg_nvmet_mrq;
	uint32_t cfg_nvmet_mrq;
	uint32_t cfg_nvmet_mrq_post;
	uint32_t cfg_enable_nvmet;
	uint32_t cfg_enable_nvmet;
	uint32_t cfg_nvme_enable_fb;
	uint32_t cfg_nvme_enable_fb;
	uint32_t cfg_nvmet_fb_size;
	uint32_t cfg_nvmet_fb_size;
+0 −11
Original line number Original line Diff line number Diff line
@@ -3315,14 +3315,6 @@ LPFC_ATTR_R(nvmet_mrq,
	    1, 1, 16,
	    1, 1, 16,
	    "Specify number of RQ pairs for processing NVMET cmds");
	    "Specify number of RQ pairs for processing NVMET cmds");


/*
 * lpfc_nvmet_mrq_post: Specify number buffers to post on every MRQ
 *
 */
LPFC_ATTR_R(nvmet_mrq_post, LPFC_DEF_MRQ_POST,
	    LPFC_MIN_MRQ_POST, LPFC_MAX_MRQ_POST,
	    "Specify number of buffers to post on every MRQ");

/*
/*
 * lpfc_enable_fc4_type: Defines what FC4 types are supported.
 * lpfc_enable_fc4_type: Defines what FC4 types are supported.
 * Supported Values:  1 - register just FCP
 * Supported Values:  1 - register just FCP
@@ -5158,7 +5150,6 @@ struct device_attribute *lpfc_hba_attrs[] = {
	&dev_attr_lpfc_suppress_rsp,
	&dev_attr_lpfc_suppress_rsp,
	&dev_attr_lpfc_nvme_io_channel,
	&dev_attr_lpfc_nvme_io_channel,
	&dev_attr_lpfc_nvmet_mrq,
	&dev_attr_lpfc_nvmet_mrq,
	&dev_attr_lpfc_nvmet_mrq_post,
	&dev_attr_lpfc_nvme_enable_fb,
	&dev_attr_lpfc_nvme_enable_fb,
	&dev_attr_lpfc_nvmet_fb_size,
	&dev_attr_lpfc_nvmet_fb_size,
	&dev_attr_lpfc_enable_bg,
	&dev_attr_lpfc_enable_bg,
@@ -6198,7 +6189,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)


	lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type);
	lpfc_enable_fc4_type_init(phba, lpfc_enable_fc4_type);
	lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq);
	lpfc_nvmet_mrq_init(phba, lpfc_nvmet_mrq);
	lpfc_nvmet_mrq_post_init(phba, lpfc_nvmet_mrq_post);


	/* Initialize first burst. Target vs Initiator are different. */
	/* Initialize first burst. Target vs Initiator are different. */
	lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
	lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb);
@@ -6295,7 +6285,6 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba)
		/* Not NVME Target mode.  Turn off Target parameters. */
		/* Not NVME Target mode.  Turn off Target parameters. */
		phba->nvmet_support = 0;
		phba->nvmet_support = 0;
		phba->cfg_nvmet_mrq = 0;
		phba->cfg_nvmet_mrq = 0;
		phba->cfg_nvmet_mrq_post = 0;
		phba->cfg_nvmet_fb_size = 0;
		phba->cfg_nvmet_fb_size = 0;
	}
	}


+4 −4
Original line number Original line Diff line number Diff line
@@ -75,6 +75,8 @@ void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
void lpfc_retry_pport_discovery(struct lpfc_hba *);
void lpfc_retry_pport_discovery(struct lpfc_hba *);
void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t);
void lpfc_release_rpi(struct lpfc_hba *, struct lpfc_vport *, uint16_t);
int lpfc_init_iocb_list(struct lpfc_hba *phba, int cnt);
void lpfc_free_iocb_list(struct lpfc_hba *phba);


void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -246,16 +248,14 @@ struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
struct rqb_dmabuf *lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba);
struct rqb_dmabuf *lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba);
void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab);
void lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab);
void lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp,
void lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba,
			struct lpfc_dmabuf *mp);
			    struct lpfc_nvmet_ctxbuf *ctxp);
int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
			       struct fc_frame_header *fc_hdr);
			       struct fc_frame_header *fc_hdr);
void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
			uint16_t);
			uint16_t);
int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
int lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
		     struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe);
		     struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe);
int lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq,
			struct lpfc_queue *dq, int count);
int lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq);
int lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hq);
void lpfc_unregister_fcf(struct lpfc_hba *);
void lpfc_unregister_fcf(struct lpfc_hba *);
void lpfc_unregister_fcf_rescan(struct lpfc_hba *);
void lpfc_unregister_fcf_rescan(struct lpfc_hba *);
+10 −82
Original line number Original line Diff line number Diff line
@@ -1099,7 +1099,7 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)


		list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
		list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
			ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
			ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
			lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
			lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
		}
		}
	}
	}


@@ -3381,7 +3381,7 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
{
{
	struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
	struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
	uint16_t i, lxri, xri_cnt, els_xri_cnt;
	uint16_t i, lxri, xri_cnt, els_xri_cnt;
	uint16_t nvmet_xri_cnt, tot_cnt;
	uint16_t nvmet_xri_cnt;
	LIST_HEAD(nvmet_sgl_list);
	LIST_HEAD(nvmet_sgl_list);
	int rc;
	int rc;


@@ -3389,20 +3389,9 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
	 * update on pci function's nvmet xri-sgl list
	 * update on pci function's nvmet xri-sgl list
	 */
	 */
	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
	els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
	nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post;


	/* Ensure we at least meet the minimun for the system */
	/* For NVMET, ALL remaining XRIs are dedicated for IO processing */
	if (nvmet_xri_cnt < LPFC_NVMET_RQE_DEF_COUNT)
	nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
		nvmet_xri_cnt = LPFC_NVMET_RQE_DEF_COUNT;

	tot_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
	if (nvmet_xri_cnt > tot_cnt) {
		phba->cfg_nvmet_mrq_post = tot_cnt / phba->cfg_nvmet_mrq;
		nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post;
		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
				"6301 NVMET post-sgl count changed to %d\n",
				phba->cfg_nvmet_mrq_post);
	}


	if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
	if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
		/* els xri-sgl expanded */
		/* els xri-sgl expanded */
@@ -5835,6 +5824,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
		spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
		spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
		INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
		INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
		INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
		INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
		INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_list);

		/* Fast-path XRI aborted CQ Event work queue list */
		/* Fast-path XRI aborted CQ Event work queue list */
		INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
		INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
	}
	}
@@ -6279,7 +6270,7 @@ lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
 *
 *
 * This routine is invoked to free the driver's IOCB list and memory.
 * This routine is invoked to free the driver's IOCB list and memory.
 **/
 **/
static void
void
lpfc_free_iocb_list(struct lpfc_hba *phba)
lpfc_free_iocb_list(struct lpfc_hba *phba)
{
{
	struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
	struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
@@ -6307,7 +6298,7 @@ lpfc_free_iocb_list(struct lpfc_hba *phba)
 *	0 - successful
 *	0 - successful
 *	other values - error
 *	other values - error
 **/
 **/
static int
int
lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
{
{
	struct lpfc_iocbq *iocbq_entry = NULL;
	struct lpfc_iocbq *iocbq_entry = NULL;
@@ -8321,46 +8312,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
	INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
}
}


int
lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
		    struct lpfc_queue *drq, int count)
{
	int rc, i;
	struct lpfc_rqe hrqe;
	struct lpfc_rqe drqe;
	struct lpfc_rqb *rqbp;
	struct rqb_dmabuf *rqb_buffer;
	LIST_HEAD(rqb_buf_list);

	rqbp = hrq->rqbp;
	for (i = 0; i < count; i++) {
		rqb_buffer = (rqbp->rqb_alloc_buffer)(phba);
		if (!rqb_buffer)
			break;
		rqb_buffer->hrq = hrq;
		rqb_buffer->drq = drq;
		list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
	}
	while (!list_empty(&rqb_buf_list)) {
		list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
				 hbuf.list);

		hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
		hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
		drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
		drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
		rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
		if (rc < 0) {
			(rqbp->rqb_free_buffer)(phba, rqb_buffer);
		} else {
			list_add_tail(&rqb_buffer->hbuf.list,
				      &rqbp->rqb_buffer_list);
			rqbp->buffer_count++;
		}
	}
	return 1;
}

int
int
lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
{
{
@@ -11103,7 +11054,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
	struct lpfc_hba   *phba;
	struct lpfc_hba   *phba;
	struct lpfc_vport *vport = NULL;
	struct lpfc_vport *vport = NULL;
	struct Scsi_Host  *shost = NULL;
	struct Scsi_Host  *shost = NULL;
	int error, cnt, num;
	int error;
	uint32_t cfg_mode, intr_mode;
	uint32_t cfg_mode, intr_mode;


	/* Allocate memory for HBA structure */
	/* Allocate memory for HBA structure */
@@ -11137,27 +11088,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
		goto out_unset_pci_mem_s4;
		goto out_unset_pci_mem_s4;
	}
	}


	cnt = phba->cfg_iocb_cnt * 1024;
	if (phba->nvmet_support) {
		/* Ensure we at least meet the minimun for the system */
		num = (phba->cfg_nvmet_mrq_post * phba->cfg_nvmet_mrq);
		if (num < LPFC_NVMET_RQE_DEF_COUNT)
			num = LPFC_NVMET_RQE_DEF_COUNT;
		cnt += num;
	}

	/* Initialize and populate the iocb list per host */
	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
			"2821 initialize iocb list %d total %d\n",
			phba->cfg_iocb_cnt, cnt);
	error = lpfc_init_iocb_list(phba, cnt);

	if (error) {
		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
				"1413 Failed to initialize iocb list.\n");
		goto out_unset_driver_resource_s4;
	}

	INIT_LIST_HEAD(&phba->active_rrq_list);
	INIT_LIST_HEAD(&phba->active_rrq_list);
	INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
	INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);


@@ -11166,7 +11096,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
	if (error) {
	if (error) {
		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
				"1414 Failed to set up driver resource.\n");
				"1414 Failed to set up driver resource.\n");
		goto out_free_iocb_list;
		goto out_unset_driver_resource_s4;
	}
	}


	/* Get the default values for Model Name and Description */
	/* Get the default values for Model Name and Description */
@@ -11266,8 +11196,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
	lpfc_destroy_shost(phba);
	lpfc_destroy_shost(phba);
out_unset_driver_resource:
out_unset_driver_resource:
	lpfc_unset_driver_resource_phase2(phba);
	lpfc_unset_driver_resource_phase2(phba);
out_free_iocb_list:
	lpfc_free_iocb_list(phba);
out_unset_driver_resource_s4:
out_unset_driver_resource_s4:
	lpfc_sli4_driver_resource_unset(phba);
	lpfc_sli4_driver_resource_unset(phba);
out_unset_pci_mem_s4:
out_unset_pci_mem_s4:
+5 −68
Original line number Original line Diff line number Diff line
@@ -629,8 +629,6 @@ struct rqb_dmabuf *
lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
{
{
	struct rqb_dmabuf *dma_buf;
	struct rqb_dmabuf *dma_buf;
	struct lpfc_iocbq *nvmewqe;
	union lpfc_wqe128 *wqe;


	dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL);
	dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL);
	if (!dma_buf)
	if (!dma_buf)
@@ -651,60 +649,6 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
		return NULL;
		return NULL;
	}
	}
	dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE;
	dma_buf->total_size = LPFC_NVMET_DATA_BUF_SIZE;

	dma_buf->context = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx),
				   GFP_KERNEL);
	if (!dma_buf->context) {
		pci_pool_free(phba->lpfc_nvmet_drb_pool, dma_buf->dbuf.virt,
			      dma_buf->dbuf.phys);
		pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
			      dma_buf->hbuf.phys);
		kfree(dma_buf);
		return NULL;
	}

	dma_buf->iocbq = lpfc_sli_get_iocbq(phba);
	if (!dma_buf->iocbq) {
		kfree(dma_buf->context);
		pci_pool_free(phba->lpfc_nvmet_drb_pool, dma_buf->dbuf.virt,
			      dma_buf->dbuf.phys);
		pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
			      dma_buf->hbuf.phys);
		kfree(dma_buf);
		lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
				"2621 Ran out of nvmet iocb/WQEs\n");
		return NULL;
	}
	dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
	nvmewqe = dma_buf->iocbq;
	wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
	/* Initialize WQE */
	memset(wqe, 0, sizeof(union lpfc_wqe));
	/* Word 7 */
	bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI);
	bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
	bf_set(wqe_pu, &wqe->generic.wqe_com, 1);
	/* Word 10 */
	bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
	bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
	bf_set(wqe_qosd, &wqe->generic.wqe_com, 0);

	dma_buf->iocbq->context1 = NULL;
	spin_lock(&phba->sli4_hba.sgl_list_lock);
	dma_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, dma_buf->iocbq);
	spin_unlock(&phba->sli4_hba.sgl_list_lock);
	if (!dma_buf->sglq) {
		lpfc_sli_release_iocbq(phba, dma_buf->iocbq);
		kfree(dma_buf->context);
		pci_pool_free(phba->lpfc_nvmet_drb_pool, dma_buf->dbuf.virt,
			      dma_buf->dbuf.phys);
		pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
			      dma_buf->hbuf.phys);
		kfree(dma_buf);
		lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
				"6132 Ran out of nvmet XRIs\n");
		return NULL;
	}
	return dma_buf;
	return dma_buf;
}
}


@@ -723,18 +667,6 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
void
void
lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab)
lpfc_sli4_nvmet_free(struct lpfc_hba *phba, struct rqb_dmabuf *dmab)
{
{
	unsigned long flags;

	__lpfc_clear_active_sglq(phba, dmab->sglq->sli4_lxritag);
	dmab->sglq->state = SGL_FREED;
	dmab->sglq->ndlp = NULL;

	spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags);
	list_add_tail(&dmab->sglq->list, &phba->sli4_hba.lpfc_nvmet_sgl_list);
	spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock, flags);

	lpfc_sli_release_iocbq(phba, dmab->iocbq);
	kfree(dmab->context);
	pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
	pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
	pci_pool_free(phba->lpfc_nvmet_drb_pool,
	pci_pool_free(phba->lpfc_nvmet_drb_pool,
		      dmab->dbuf.virt, dmab->dbuf.phys);
		      dmab->dbuf.virt, dmab->dbuf.phys);
@@ -822,6 +754,11 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
	rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
	rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
	if (rc < 0) {
	if (rc < 0) {
		(rqbp->rqb_free_buffer)(phba, rqb_entry);
		(rqbp->rqb_free_buffer)(phba, rqb_entry);
		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
				"6409 Cannot post to RQ %d: %x %x\n",
				rqb_entry->hrq->queue_id,
				rqb_entry->hrq->host_index,
				rqb_entry->hrq->hba_index);
	} else {
	} else {
		list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
		list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
		rqbp->buffer_count++;
		rqbp->buffer_count++;
Loading