Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 318083ad authored by James Smart's avatar James Smart Committed by Martin K. Petersen
Browse files

scsi: lpfc: add NVME exchange aborts



previous code did little more than log a message.

This patch adds abort path support, modeled after the SCSI code paths.
Currently addresses only the initiator path. Target path under
development, but stubbed out.

Signed-off-by: default avatarDick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: default avatarJames Smart <james.smart@broadcom.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 3ebd9b47
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -692,6 +692,7 @@ struct lpfc_hba {
					 * capability
					 */
#define HBA_NVME_IOQ_FLUSH      0x80000 /* NVME IO queues flushed. */
#define NVME_XRI_ABORT_EVENT	0x100000

	uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
	struct lpfc_dmabuf slim2p;
+2 −0
Original line number Diff line number Diff line
@@ -641,6 +641,8 @@ lpfc_work_done(struct lpfc_hba *phba)
			lpfc_handle_rrq_active(phba);
		if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
			lpfc_sli4_fcp_xri_abort_event_proc(phba);
		if (phba->hba_flag & NVME_XRI_ABORT_EVENT)
			lpfc_sli4_nvme_xri_abort_event_proc(phba);
		if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
			lpfc_sli4_els_xri_abort_event_proc(phba);
		if (phba->hba_flag & ASYNC_EVENT)
+7 −0
Original line number Diff line number Diff line
@@ -5723,6 +5723,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
		/* Initialize the Abort nvme buffer list used by driver */
		spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
		INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
		/* Fast-path XRI aborted CQ Event work queue list */
		INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
	}

	/* This abort list used by worker thread */
@@ -8960,6 +8962,11 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
	/* Pending ELS XRI abort events */
	list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
			 &cqelist);
	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
		/* Pending NVME XRI abort events */
		list_splice_init(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
				 &cqelist);
	}
	/* Pending asynnc events */
	list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
			 &cqelist);
+59 −5
Original line number Diff line number Diff line
@@ -1277,6 +1277,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
	pnvme_fcreq->private = (void *)lpfc_ncmd;
	lpfc_ncmd->nvmeCmd = pnvme_fcreq;
	lpfc_ncmd->nrport = rport;
	lpfc_ncmd->ndlp = ndlp;
	lpfc_ncmd->start_time = jiffies;

	lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp);
@@ -1812,10 +1813,10 @@ lpfc_post_nvme_sgl_list(struct lpfc_hba *phba,
						pdma_phys_sgl1, cur_xritag);
				if (status) {
					/* failure, put on abort nvme list */
					lpfc_ncmd->exch_busy = 1;
					lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
				} else {
					/* success, put on NVME buffer list */
					lpfc_ncmd->exch_busy = 0;
					lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
					lpfc_ncmd->status = IOSTAT_SUCCESS;
					num_posted++;
				}
@@ -1845,10 +1846,10 @@ lpfc_post_nvme_sgl_list(struct lpfc_hba *phba,
					 struct lpfc_nvme_buf, list);
			if (status) {
				/* failure, put on abort nvme list */
				lpfc_ncmd->exch_busy = 1;
				lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
			} else {
				/* success, put on NVME buffer list */
				lpfc_ncmd->exch_busy = 0;
				lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
				lpfc_ncmd->status = IOSTAT_SUCCESS;
				num_posted++;
			}
@@ -2090,7 +2091,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
	unsigned long iflag = 0;

	lpfc_ncmd->nonsg_phys = 0;
	if (lpfc_ncmd->exch_busy) {
	if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
		spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock,
					iflag);
		lpfc_ncmd->nvmeCmd = NULL;
@@ -2453,3 +2454,56 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
			 "6168: State error: lport %p, rport%p FCID x%06x\n",
			 vport->localport, ndlp->rport, ndlp->nlp_DID);
}

/**
 * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
 * @phba: pointer to lpfc hba data structure.
 * @axri: pointer to the fcp xri abort wcqe structure.
 *
 * This routine is invoked by the worker thread to process a SLI4 fast-path
 * FCP aborted xri.
 **/
void
lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
			   struct sli4_wcqe_xri_aborted *axri)
{
	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
	struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd;
	struct lpfc_nodelist *ndlp;
	unsigned long iflag = 0;
	int rrq_empty = 0;

	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
		return;
	spin_lock_irqsave(&phba->hbalock, iflag);
	spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
	list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd,
				 &phba->sli4_hba.lpfc_abts_nvme_buf_list,
				 list) {
		if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
			list_del(&lpfc_ncmd->list);
			lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
			lpfc_ncmd->status = IOSTAT_SUCCESS;
			spin_unlock(
				&phba->sli4_hba.abts_nvme_buf_list_lock);

			rrq_empty = list_empty(&phba->active_rrq_list);
			spin_unlock_irqrestore(&phba->hbalock, iflag);
			ndlp = lpfc_ncmd->ndlp;
			if (ndlp) {
				lpfc_set_rrq_active(
					phba, ndlp,
					lpfc_ncmd->cur_iocbq.sli4_lxritag,
					rxid, 1);
				lpfc_sli4_abts_err_handler(phba, ndlp, axri);
			}
			lpfc_release_nvme_buf(phba, lpfc_ncmd);
			if (rrq_empty)
				lpfc_worker_wake_up(phba);
			return;
		}
	}
	spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
	spin_unlock_irqrestore(&phba->hbalock, iflag);
}
+1 −0
Original line number Diff line number Diff line
@@ -57,6 +57,7 @@ struct lpfc_nvme_buf {
	struct list_head list;
	struct nvmefc_fcp_req *nvmeCmd;
	struct lpfc_nvme_rport *nrport;
	struct lpfc_nodelist *ndlp;

	uint32_t timeout;

Loading