Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8b0dff14 authored by James Smart's avatar James Smart Committed by James Bottomley
Browse files

lpfc: Add support for using block multi-queue



With blk-mq support in the mid-layer, lpfc can do IO steering based
on the information in the request tag.  This patch allows lpfc to use
blk-mq if enabled. If not enabled, we fall back into the emulex-internal
affinity mappings.

This feature can be turned on via CONFIG_SCSI_MQ_DEFAULT or passing
scsi_mod.use_blk_mq=Y as a parameter to the kernel.

Signed-off-by: default avatarDick Kennedy <dick.kennedy@avagotech.com>
Signed-off-by: default avatarJames Smart <james.smart@avagotech.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Signed-off-by: default avatarJames Bottomley <JBottomley@Odin.com>
parent 953ceeda
Loading
Loading
Loading
Loading
+3 −1
Original line number Diff line number Diff line
@@ -3303,6 +3303,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
	shost->max_lun = vport->cfg_max_luns;
	shost->this_id = -1;
	shost->max_cmd_len = 16;
	shost->nr_hw_queues = phba->cfg_fcp_io_channel;
	if (phba->sli_rev == LPFC_SLI_REV4) {
		shost->dma_boundary =
			phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
@@ -8980,6 +8981,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
		phba->cfg_fcp_io_channel = vectors;
	}

	if (!shost_use_blk_mq(lpfc_shost_from_vport(phba->pport)))
		lpfc_sli4_set_affinity(phba, vectors);
	return rc;

+43 −0
Original line number Diff line number Diff line
@@ -3845,6 +3845,49 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
	lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
}

/**
 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
 * @phba: Pointer to HBA context object.
 *
 * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
 * distribution.  This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
 * held.
 * If scsi-mq is enabled, get the default block layer mapping of software queues
 * to hardware queues. This information is saved in request tag.
 *
 * Return: index into SLI4 fast-path FCP queue index.
 **/
int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
				  struct lpfc_scsi_buf *lpfc_cmd)
{
	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
	struct lpfc_vector_map_info *cpup;
	int chann, cpu;
	uint32_t tag;
	uint16_t hwq;

	if (shost_use_blk_mq(cmnd->device->host)) {
		tag = blk_mq_unique_tag(cmnd->request);
		hwq = blk_mq_unique_tag_to_hwq(tag);

		return hwq;
	}

	if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU
	    && phba->cfg_fcp_io_channel > 1) {
		cpu = smp_processor_id();
		if (cpu < phba->sli4_hba.num_present_cpu) {
			cpup = phba->sli4_hba.cpu_map;
			cpup += cpu;
			return cpup->channel_id;
		}
	}
	chann = atomic_add_return(1, &phba->fcp_qidx);
	chann = (chann % phba->cfg_fcp_io_channel);
	return chann;
}


/**
 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
 * @phba: The Hba for which this call is being executed.
+3 −0
Original line number Diff line number Diff line
@@ -184,3 +184,6 @@ struct lpfc_scsi_buf {
#define FIND_FIRST_OAS_LUN		 0
#define NO_MORE_OAS_LUN			-1
#define NOT_OAS_ENABLED_LUN		NO_MORE_OAS_LUN

int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
				  struct lpfc_scsi_buf *lpfc_cmd);
+23 −51
Original line number Diff line number Diff line
@@ -8137,36 +8137,6 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
	return sglq->sli4_xritag;
}
/**
 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
 * @phba: Pointer to HBA context object.
 *
 * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
 * distribution.  This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
 * held.
 *
 * Return: index into SLI4 fast-path FCP queue index.
 **/
static inline int
lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
{
	struct lpfc_vector_map_info *cpup;
	int chann, cpu;
	if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU
	    && phba->cfg_fcp_io_channel > 1) {
		cpu = smp_processor_id();
		if (cpu < phba->sli4_hba.num_present_cpu) {
			cpup = phba->sli4_hba.cpu_map;
			cpup += cpu;
			return cpup->channel_id;
		}
	}
	chann = atomic_add_return(1, &phba->fcp_qidx);
	chann = (chann % phba->cfg_fcp_io_channel);
	return chann;
}
/**
 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
 * @phba: Pointer to HBA context object.
@@ -8807,7 +8777,9 @@ int
lpfc_sli_calc_ring(struct lpfc_hba *phba, uint32_t ring_number,
		    struct lpfc_iocbq *piocb)
{
	if (phba->sli_rev == LPFC_SLI_REV4) {
	if (phba->sli_rev < LPFC_SLI_REV4)
		return ring_number;
	if (piocb->iocb_flag &  (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
		if (!(phba->cfg_fof) ||
				(!(piocb->iocb_flag & LPFC_IO_FOF))) {
@@ -8819,7 +8791,8 @@ lpfc_sli_calc_ring(struct lpfc_hba *phba, uint32_t ring_number,
			 */
			if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX))
				piocb->fcp_wqidx =
					    lpfc_sli4_scmd_to_wqidx_distr(phba);
					lpfc_sli4_scmd_to_wqidx_distr(phba,
							      piocb->context1);
			ring_number = MAX_SLI3_CONFIGURED_RINGS +
				piocb->fcp_wqidx;
		} else {
@@ -8829,7 +8802,6 @@ lpfc_sli_calc_ring(struct lpfc_hba *phba, uint32_t ring_number,
			ring_number =  LPFC_FCP_OAS_RING;
		}
	}
	}
	return ring_number;
}