Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9a069e19 authored by Giridhar Malavali's avatar Giridhar Malavali Committed by James Bottomley
Browse files

[SCSI] qla2xxx: Add BSG support for FC ELS/CT passthrough and vendor commands.



[jejb: fixed printk casting issues]
Signed-off-by: default avatarSarang Radke <sarang.radke@qlogic.com>
Signed-off-by: default avatarGiridhar Malavali <giridhar.malavali@qlogic.com>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@suse.de>
parent 90a86fc0
Loading
Loading
Loading
Loading
+745 −1
Original line number Diff line number Diff line
@@ -11,7 +11,9 @@
#include <linux/delay.h>

static int qla24xx_vport_disable(struct fc_vport *, bool);

static int qla84xx_reset(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t, uint16_t *);
static int qla84xx_mgmt_cmd(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
/* SYSFS attributes --------------------------------------------------------- */

static ssize_t
@@ -1167,6 +1169,28 @@ qla2x00_total_isp_aborts_show(struct device *dev,
	    ha->qla_stats.total_isp_aborts);
}

static ssize_t
qla24xx_84xx_fw_version_show(struct device *dev,
	struct device_attribute *attr, char *buf)
{
	int rval = QLA_SUCCESS;
	uint16_t status[2] = {0, 0};
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
	struct qla_hw_data *ha = vha->hw;

	if (IS_QLA84XX(ha) && ha->cs84xx) {
		if (ha->cs84xx->op_fw_version == 0) {
			rval = qla84xx_verify_chip(vha, status);
	}

	if ((rval == QLA_SUCCESS) && (status[0] == 0))
		return snprintf(buf, PAGE_SIZE, "%u\n",
			(uint32_t)ha->cs84xx->op_fw_version);
	}

	return snprintf(buf, PAGE_SIZE, "\n");
}

static ssize_t
qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
    char *buf)
@@ -1281,6 +1305,8 @@ static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
		   qla2x00_optrom_fcode_version_show, NULL);
static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
		   NULL);
static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
		   NULL);
static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
		   NULL);
static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
@@ -1310,6 +1336,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
	&dev_attr_optrom_efi_version,
	&dev_attr_optrom_fcode_version,
	&dev_attr_optrom_fw_version,
	&dev_attr_84xx_fw_version,
	&dev_attr_total_isp_aborts,
	&dev_attr_mpi_version,
	&dev_attr_phy_version,
@@ -1795,6 +1822,597 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
	return 0;
}

/* BSG support for ELS/CT pass through */
inline srb_t *
qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
{
	srb_t *sp;
	struct qla_hw_data *ha = vha->hw;
	struct srb_bsg_ctx *ctx;

	sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
	if (!sp)
		goto done;
	ctx = kzalloc(size, GFP_KERNEL);
	if (!ctx) {
		mempool_free(sp, ha->srb_mempool);
		goto done;
	}

	memset(sp, 0, sizeof(*sp));
	sp->fcport = fcport;
	sp->ctx = ctx;
done:
	return sp;
}

static int
qla2x00_process_els(struct fc_bsg_job *bsg_job)
{
	struct fc_rport *rport;
	fc_port_t *fcport;
	struct Scsi_Host *host;
	scsi_qla_host_t *vha;
	struct qla_hw_data *ha;
	srb_t *sp;
	const char *type;
	int req_sg_cnt, rsp_sg_cnt;
	int rval =  (DRIVER_ERROR << 16);
	uint16_t nextlid = 0;
	struct srb_bsg *els;

	/*  Multiple SG's are not supported for ELS requests */
        if (bsg_job->request_payload.sg_cnt > 1 ||
		bsg_job->reply_payload.sg_cnt > 1) {
		DEBUG2(printk(KERN_INFO
		    "multiple SG's are not supported for ELS requests"
		    " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
		    bsg_job->request_payload.sg_cnt,
		    bsg_job->reply_payload.sg_cnt));
		rval = -EPERM;
		goto done;
        }

	/* ELS request for rport */
	if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
		rport = bsg_job->rport;
		fcport = *(fc_port_t **) rport->dd_data;
		host = rport_to_shost(rport);
		vha = shost_priv(host);
		ha = vha->hw;
		type = "FC_BSG_RPT_ELS";

		DEBUG2(printk(KERN_INFO
		    "scsi(%ld): loop-id=%x portid=%02x%02x%02x.\n",
		    fcport->vha->host_no, fcport->loop_id,
		    fcport->d_id.b.domain, fcport->d_id.b.area,
		    fcport->d_id.b.al_pa));

		/* make sure the rport is logged in,
		 * if not perform fabric login
		 */
		if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
			DEBUG2(qla_printk(KERN_WARNING, ha,
			    "failed to login port %06X for ELS passthru\n",
			    fcport->d_id.b24));
			rval = -EIO;
			goto done;
		}
	} else {
		host = bsg_job->shost;
		vha = shost_priv(host);
		ha = vha->hw;
		type = "FC_BSG_HST_ELS_NOLOGIN";

		DEBUG2(printk(KERN_INFO
		    "scsi(%ld): loop-id=%x portid=%02x%02x%02x.\n",
		    vha->host_no, vha->loop_id,
		    vha->d_id.b.domain, vha->d_id.b.area, vha->d_id.b.al_pa));

		/* Allocate a dummy fcport structure, since functions
		 * preparing the IOCB and mailbox command retrieves port
		 * specific information from fcport structure. For Host based
		 * ELS commands there will be no fcport structure allocated
		 */
		fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
		if (!fcport) {
			rval = -ENOMEM;
			goto done;
		}

		/* Initialize all required  fields of fcport */
		fcport->vha = vha;
		fcport->vp_idx = vha->vp_idx;
		fcport->d_id.b.al_pa =
		    bsg_job->request->rqst_data.h_els.port_id[0];
		fcport->d_id.b.area =
		    bsg_job->request->rqst_data.h_els.port_id[1];
		fcport->d_id.b.domain =
		    bsg_job->request->rqst_data.h_els.port_id[2];
		fcport->loop_id =
		    (fcport->d_id.b.al_pa == 0xFD) ?
		    NPH_FABRIC_CONTROLLER : NPH_F_PORT;
	}

	DEBUG2(printk(KERN_INFO
	    "scsi(%ld): vendor-id = %llu\n",
	    vha->host_no, host->hostt->vendor_id));

        req_sg_cnt =
	    dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
	    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
        if (!req_sg_cnt) {
		rval = -ENOMEM;
		goto done_free_fcport;
	}
        rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
	    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
        if (!rsp_sg_cnt) {
		rval = -ENOMEM;
                goto done_free_fcport;
	}

	if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
	    (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
	{
		DEBUG2(printk(KERN_INFO
		    "dma mapping resulted in different sg counts \
		    [request_sg_cnt: %x dma_request_sg_cnt: %x\
		    reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
		    bsg_job->request_payload.sg_cnt, req_sg_cnt,
		    bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
		rval = -EAGAIN;
                goto done_unmap_sg;
	}

	/* Alloc SRB structure */
	sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
	if (!sp) {
		rval = -ENOMEM;
                goto done_unmap_sg;
	}

	els = sp->ctx;
	els->ctx.type =
	    (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
	    SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
	els->bsg_job = bsg_job;

	DEBUG2(qla_printk(KERN_INFO, ha,
	    "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
	    "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
	    bsg_job->request->rqst_data.h_els.command_code,
	    fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
	    fcport->d_id.b.al_pa));

	rval = qla2x00_start_sp(sp);
	if (rval != QLA_SUCCESS) {
		kfree(sp->ctx);
		mempool_free(sp, ha->srb_mempool);
		rval = -EIO;
		goto done_unmap_sg;
	}
	return rval;

done_unmap_sg:
	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
		bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
	dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
		bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
	goto done_free_fcport;

done_free_fcport:
	if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
		kfree(fcport);
done:
	return rval;
}

static int
qla2x00_process_ct(struct fc_bsg_job *bsg_job)
{
	srb_t *sp;
	struct Scsi_Host *host = bsg_job->shost;
	scsi_qla_host_t *vha = shost_priv(host);
	struct qla_hw_data *ha = vha->hw;
	int rval = (DRIVER_ERROR << 16);
	int req_sg_cnt, rsp_sg_cnt;
	uint16_t loop_id;
	struct fc_port *fcport;
	char  *type = "FC_BSG_HST_CT";
	struct srb_bsg *ct;

	/* pass through is supported only for ISP 4Gb or higher */
        if (!IS_FWI2_CAPABLE(ha)) {
		DEBUG2(qla_printk(KERN_INFO, ha,
		    "scsi(%ld):Firmware is not capable to support FC "
		    "CT pass thru\n", vha->host_no));
		rval = -EPERM;
                goto done;
	}

        req_sg_cnt =
	    dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
	    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
        if (!req_sg_cnt) {
		rval = -ENOMEM;
		goto done;
	}

        rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
            bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
        if (!rsp_sg_cnt) {
		rval = -ENOMEM;
                goto done;
	}

	if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
		(rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
	{
		DEBUG2(qla_printk(KERN_WARNING, ha,
		    "dma mapping resulted in different sg counts \
		    [request_sg_cnt: %x dma_request_sg_cnt: %x\
		    reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
		    bsg_job->request_payload.sg_cnt, req_sg_cnt,
		    bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
		rval = -EAGAIN;
                goto done_unmap_sg;
	}

	loop_id =
	    (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
	    >> 24;
	switch (loop_id) {
		case 0xFC:
			loop_id = cpu_to_le16(NPH_SNS);
			break;
		case 0xFA:
			loop_id = vha->mgmt_svr_loop_id;
			break;
		default:
			DEBUG2(qla_printk(KERN_INFO, ha,
			    "Unknown loop id: %x\n", loop_id));
			rval = -EINVAL;
			goto done_unmap_sg;
	}

	/* Allocate a dummy fcport structure, since functions preparing the
	 * IOCB and mailbox command retrieves port specific information
	 * from fcport structure. For Host based ELS commands there will be
	 * no fcport structure allocated
	 */
	fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
	if (!fcport)
	{
		rval = -ENOMEM;
		goto  done_unmap_sg;
	}

	/* Initialize all required  fields of fcport */
	fcport->vha = vha;
	fcport->vp_idx = vha->vp_idx;
	fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
	fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
	fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
	fcport->loop_id = loop_id;

	/* Alloc SRB structure */
	sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
	if (!sp) {
		rval = -ENOMEM;
		goto done_free_fcport;
	}

	ct = sp->ctx;
	ct->ctx.type = SRB_CT_CMD;
	ct->bsg_job = bsg_job;

	DEBUG2(qla_printk(KERN_INFO, ha,
	    "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
	    "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
	    (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
	    fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
	    fcport->d_id.b.al_pa));

	rval = qla2x00_start_sp(sp);
	if (rval != QLA_SUCCESS) {
		kfree(sp->ctx);
		mempool_free(sp, ha->srb_mempool);
		rval = -EIO;
		goto done_free_fcport;
	}
	return rval;

done_free_fcport:
	kfree(fcport);
done_unmap_sg:
	dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
	    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
	dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
	    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
done:
	return rval;
}

static int
qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
{
	struct Scsi_Host *host = bsg_job->shost;
	scsi_qla_host_t *vha = shost_priv(host);
	struct qla_hw_data *ha = vha->hw;
	int rval;
	uint8_t command_sent;
	uint32_t vendor_cmd;
	char *type;
	struct msg_echo_lb elreq;
	uint16_t response[MAILBOX_REGISTER_COUNT];
	uint8_t* fw_sts_ptr;
	uint8_t *req_data;
	dma_addr_t req_data_dma;
	uint32_t req_data_len;
	uint8_t *rsp_data;
	dma_addr_t rsp_data_dma;
	uint32_t rsp_data_len;

	if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
	    test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
	    test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
		rval = -EBUSY;
		goto done;
	}

        elreq.req_sg_cnt =
	    dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
	    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
        if (!elreq.req_sg_cnt) {
		rval = -ENOMEM;
		goto done;
	}
        elreq.rsp_sg_cnt =
	    dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
	    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
        if (!elreq.rsp_sg_cnt) {
		rval = -ENOMEM;
                goto done;
	}

	if ((elreq.req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
	    (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
	{
		DEBUG2(printk(KERN_INFO
		    "dma mapping resulted in different sg counts \
		    [request_sg_cnt: %x dma_request_sg_cnt: %x\
		    reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
		    bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
		    bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
		rval = -EAGAIN;
                goto done_unmap_sg;
	}
	req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
	req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
	    &req_data_dma, GFP_KERNEL);

	rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
	    &rsp_data_dma, GFP_KERNEL);

	/* Copy the request buffer in req_data now */
	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
	    bsg_job->request_payload.sg_cnt, req_data,
	    req_data_len);

	elreq.send_dma = req_data_dma;
	elreq.rcv_dma = rsp_data_dma;
	elreq.transfer_size = req_data_len;

	/* Vendor cmd : loopback or ECHO diagnostic
	 * Options:
	 * 	Loopback : Either internal or external loopback
	 * 	ECHO: ECHO ELS or Vendor specific FC4  link data
	 */
	vendor_cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd[0];
	elreq.options =
	    *(((uint32_t *)bsg_job->request->rqst_data.h_vendor.vendor_cmd)
	    + 1);

	switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
	case QL_VND_LOOPBACK:
		if (ha->current_topology != ISP_CFG_F) {
			type = "FC_BSG_HST_VENDOR_LOOPBACK";

			if ((IS_QLA81XX(ha)) &&
				((elreq.options == 0) || (elreq.options == 2))) {
				DEBUG2(qla_printk(KERN_INFO, ha, "scsi(%ld)"
				"loopback option:0x%x not supported\n", vha->host_no, elreq.options));
				rval = -EINVAL;
				goto done_unmap_sg;
			}

			DEBUG2(qla_printk(KERN_INFO, ha,
				"scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
				vha->host_no, type, vendor_cmd, elreq.options));
			DEBUG2(qla_printk(KERN_INFO, ha,
				"scsi(%ld) tx_addr: 0x%llx rx_addr: 0x%llx tx_sg_cnt: %x rx_sg_cnt: %x\n",
				vha->host_no, (unsigned long long)elreq.send_dma, (unsigned long long)elreq.rcv_dma, elreq.req_sg_cnt, elreq.rsp_sg_cnt));
			command_sent = INT_DEF_LB_LOOPBACK_CMD;
			rval = qla2x00_loopback_test(vha, &elreq, response);
			if (IS_QLA81XX(ha)) {
				if (response[0] == MBS_COMMAND_ERROR && response[1] == MBS_LB_RESET) {
					DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
						"ISP\n", __func__, vha->host_no));
					set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
					qla2xxx_wake_dpc(vha);
				 }
			}
		} else {
			type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
			DEBUG2(qla_printk(KERN_INFO, ha,
				"scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
				vha->host_no, type, vendor_cmd, elreq.options));
			DEBUG2(qla_printk(KERN_INFO, ha,
				"scsi(%ld) tx_addr: 0x%llx rx_addr: 0x%llx tx_sg_cnt: %x rx_sg_cnt: %x\n",
				vha->host_no, (unsigned long long)elreq.send_dma, (unsigned long long)elreq.rcv_dma, elreq.req_sg_cnt, elreq.rsp_sg_cnt));
			command_sent = INT_DEF_LB_ECHO_CMD;
			rval = qla2x00_echo_test(vha, &elreq, response);
		}
		break;
	case QLA84_RESET:
		if (!IS_QLA84XX(vha->hw)) {
			rval = -EINVAL;
			DEBUG16(printk(
				"%s(%ld): 8xxx exiting.\n",
				__func__, vha->host_no));
			return rval;
		}
		rval = qla84xx_reset(vha, &elreq, bsg_job);
		break;
	case QLA84_MGMT_CMD:
		if (!IS_QLA84XX(vha->hw)) {
			rval = -EINVAL;
			DEBUG16(printk(
				"%s(%ld): 8xxx exiting.\n",
				__func__, vha->host_no));
			return rval;
		}
		rval = qla84xx_mgmt_cmd(vha, &elreq, bsg_job);
		break;
	default:
		rval = -ENOSYS;
	}

	if (rval != QLA_SUCCESS) {
		DEBUG2(qla_printk(KERN_WARNING, ha,
			"scsi(%ld) Vendor request %s failed\n", vha->host_no, type));
		rval = 0;
		bsg_job->reply->result = (DID_ERROR << 16);
		fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
		memcpy( fw_sts_ptr, response, sizeof(response));
		fw_sts_ptr += sizeof(response);
                *fw_sts_ptr = command_sent;
	} else {
		DEBUG2(qla_printk(KERN_WARNING, ha,
			"scsi(%ld) Vendor request %s completed\n", vha->host_no, type));
		rval = bsg_job->reply->result = 0;
		bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(response) + sizeof(uint8_t);
		bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
		fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
		memcpy(fw_sts_ptr, response, sizeof(response));
		fw_sts_ptr += sizeof(response);
		*fw_sts_ptr = command_sent;
		sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
		bsg_job->reply_payload.sg_cnt, rsp_data,
		rsp_data_len);
	}
	bsg_job->job_done(bsg_job);

done_unmap_sg:

	if(req_data)
		dma_free_coherent(&ha->pdev->dev, req_data_len,
			req_data, req_data_dma);
	dma_unmap_sg(&ha->pdev->dev,
	    bsg_job->request_payload.sg_list,
	    bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
	dma_unmap_sg(&ha->pdev->dev,
	    bsg_job->reply_payload.sg_list,
	    bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);

done:
        return rval;
}

static int
qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
{
        int ret = -EINVAL;

        switch (bsg_job->request->msgcode) {
		case FC_BSG_RPT_ELS:
		case FC_BSG_HST_ELS_NOLOGIN:
			ret = qla2x00_process_els(bsg_job);
			break;
		case FC_BSG_HST_CT:
			ret = qla2x00_process_ct(bsg_job);
			break;
		case FC_BSG_HST_VENDOR:
			ret = qla2x00_process_vendor_specific(bsg_job);
			break;
		case FC_BSG_HST_ADD_RPORT:
		case FC_BSG_HST_DEL_RPORT:
		case FC_BSG_RPT_CT:
		default:
			DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
			break;
        }
	return ret;
}

static int
qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
{
        scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
        struct qla_hw_data *ha = vha->hw;
        srb_t *sp;
        int i;
        unsigned long flags;
        uint16_t que_id;
        struct req_que *req;
        struct rsp_que *rsp;
	int found = 0;
	struct srb_bsg *sp_bsg;

	/* find the bsg job from the active list of commands */
        spin_lock_irqsave(&ha->hardware_lock, flags);
	req = ha->req_q_map[0];
        que_id = req->id;
        if (req->rsp)
                rsp = req->rsp;
        else
                rsp = ha->rsp_q_map[que_id];

	for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++ ) {
                sp = req->outstanding_cmds[i];

                if (sp == NULL)
                        continue;

		sp_bsg = (struct srb_bsg*)sp->ctx;

		if (((sp_bsg->ctx.type == SRB_CT_CMD) ||
		    (sp_bsg->ctx.type == SRB_ELS_CMD_RPT)
		    || ( sp_bsg->ctx.type == SRB_ELS_CMD_HST)) &&
		    (sp_bsg->bsg_job == bsg_job)) {
			DEBUG2(qla_printk(KERN_INFO, ha,
			    "scsi(%ld) req_q: %p rsp_q: %p que_id: %x sp: %p\n",
			    vha->host_no, req, rsp, que_id, sp));
			found = 1;
			break;
		}
	}
	spin_unlock_irqrestore(&ha->hardware_lock, flags);
	if (!found) {
		DEBUG2(qla_printk(KERN_INFO, ha,
			"scsi(%ld) SRB not found to abort\n", vha->host_no));
		bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
		return 0;
	}

	if (ha->isp_ops->abort_command(sp)) {
		DEBUG2(qla_printk(KERN_INFO, ha,
		"scsi(%ld): mbx abort_command failed\n", vha->host_no));
		bsg_job->req->errors = bsg_job->reply->result = -EIO;
	} else {
		DEBUG2(qla_printk(KERN_INFO, ha,
		"scsi(%ld): mbx abort_command success\n", vha->host_no));
		bsg_job->req->errors = bsg_job->reply->result = 0;
	}

	if (bsg_job->request->msgcode == FC_BSG_HST_CT)
		kfree(sp->fcport);
	kfree(sp->ctx);
	mempool_free(sp, ha->srb_mempool);
	return 0;
}

struct fc_function_template qla2xxx_transport_functions = {

	.show_host_node_name = 1,
@@ -1838,6 +2456,8 @@ struct fc_function_template qla2xxx_transport_functions = {
	.vport_create = qla24xx_vport_create,
	.vport_disable = qla24xx_vport_disable,
	.vport_delete = qla24xx_vport_delete,
	.bsg_request = qla24xx_bsg_request,
	.bsg_timeout = qla24xx_bsg_timeout,
};

struct fc_function_template qla2xxx_transport_vport_functions = {
@@ -1878,6 +2498,8 @@ struct fc_function_template qla2xxx_transport_vport_functions = {
	.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
	.terminate_rport_io = qla2x00_terminate_rport_io,
	.get_fc_host_stats = qla2x00_get_fc_host_stats,
	.bsg_request = qla24xx_bsg_request,
	.bsg_timeout = qla24xx_bsg_timeout,
};

void
@@ -1906,3 +2528,125 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
		speed = FC_PORTSPEED_1GBIT;
	fc_host_supported_speeds(vha->host) = speed;
}
static int
qla84xx_reset(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
{
	int             ret = 0;
	int             cmd;
	uint16_t        cmd_status;

	DEBUG16(printk("%s(%ld): entered.\n", __func__, ha->host_no));

	cmd = (*((bsg_job->request->rqst_data.h_vendor.vendor_cmd) + 2))
			== A84_RESET_FLAG_ENABLE_DIAG_FW ?
				A84_ISSUE_RESET_DIAG_FW : A84_ISSUE_RESET_OP_FW;
	ret = qla84xx_reset_chip(ha, cmd == A84_ISSUE_RESET_DIAG_FW,
	&cmd_status);
	return ret;
}

static int
qla84xx_mgmt_cmd(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
{
	struct access_chip_84xx *mn;
	dma_addr_t mn_dma, mgmt_dma;
	void *mgmt_b = NULL;
	int ret = 0;
	int rsp_hdr_len, len = 0;
	struct qla84_msg_mgmt *ql84_mgmt;

	ql84_mgmt = (struct qla84_msg_mgmt *) vmalloc(sizeof(struct qla84_msg_mgmt));
	ql84_mgmt->cmd =
		*((uint16_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 2));
	ql84_mgmt->mgmtp.u.mem.start_addr =
		*((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 3));
	ql84_mgmt->len =
		*((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 4));
	ql84_mgmt->mgmtp.u.config.id =
		*((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 5));
	ql84_mgmt->mgmtp.u.config.param0 =
		*((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 6));
	ql84_mgmt->mgmtp.u.config.param1 =
		*((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 7));
	ql84_mgmt->mgmtp.u.info.type =
		*((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 8));
	ql84_mgmt->mgmtp.u.info.context =
		*((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 9));

	rsp_hdr_len = bsg_job->request_payload.payload_len;

	mn = dma_pool_alloc(ha->hw->s_dma_pool, GFP_KERNEL, &mn_dma);
	if (mn == NULL) {
		DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
		"failed%lu\n", __func__, ha->host_no));
		return -ENOMEM;
	}

	memset(mn, 0, sizeof (struct access_chip_84xx));

	mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
	mn->entry_count = 1;

	switch (ql84_mgmt->cmd) {
	case QLA84_MGMT_READ_MEM:
		mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
		mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
		break;
	case QLA84_MGMT_WRITE_MEM:
		mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
		mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
		break;
	case QLA84_MGMT_CHNG_CONFIG:
		mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
		mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.id);
		mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param0);
		mn->parameter3 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param1);
		break;
	case QLA84_MGMT_GET_INFO:
		mn->options = cpu_to_le16(ACO_REQUEST_INFO);
		mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.type);
		mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.context);
		break;
	default:
		ret = -EIO;
		goto exit_mgmt0;
	}

	if ((len == ql84_mgmt->len) &&
		ql84_mgmt->cmd != QLA84_MGMT_CHNG_CONFIG) {
		mgmt_b = dma_alloc_coherent(&ha->hw->pdev->dev, len,
				&mgmt_dma, GFP_KERNEL);
		if (mgmt_b == NULL) {
			DEBUG2(printk(KERN_ERR "%s: dma alloc mgmt_b "
			"failed%lu\n", __func__, ha->host_no));
			ret = -ENOMEM;
			goto exit_mgmt0;
		}
		mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->len);
		mn->dseg_count = cpu_to_le16(1);
		mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
		mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
		mn->dseg_length = cpu_to_le32(len);

		if (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM) {
			memcpy(mgmt_b, ql84_mgmt->payload, len);
		}
	}

	ret = qla2x00_issue_iocb(ha, mn, mn_dma, 0);
	if ((ret != QLA_SUCCESS) || (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM)
		|| (ql84_mgmt->cmd == QLA84_MGMT_CHNG_CONFIG)) {
			if (ret != QLA_SUCCESS)
				DEBUG2(printk(KERN_ERR "%s(%lu): failed\n",
					__func__, ha->host_no));
	} else if ((ql84_mgmt->cmd == QLA84_MGMT_READ_MEM) ||
			(ql84_mgmt->cmd == QLA84_MGMT_GET_INFO)) {
	}

	if (mgmt_b)
		dma_free_coherent(&ha->hw->pdev->dev, len, mgmt_b, mgmt_dma);

exit_mgmt0:
	dma_pool_free(ha->hw->s_dma_pool, mn, mn_dma);
	return ret;
}
+155 −0

File changed.

Preview size limit exceeded, changes collapsed.

+33 −0
Original line number Diff line number Diff line
@@ -627,6 +627,39 @@ struct els_entry_24xx {
	uint32_t rx_len;		/* Data segment 1 length. */
};

struct els_sts_entry_24xx {
	uint8_t entry_type;		/* Entry type. */
	uint8_t entry_count;		/* Entry count. */
	uint8_t sys_define;		/* System Defined. */
	uint8_t entry_status;		/* Entry Status. */

	uint32_t handle;		/* System handle. */

	uint16_t comp_status;

	uint16_t nport_handle;		/* N_PORT handle. */

	uint16_t reserved_1;

	uint8_t vp_index;
	uint8_t sof_type;

	uint32_t rx_xchg_address;	/* Receive exchange address. */
	uint16_t reserved_2;

	uint8_t opcode;
	uint8_t reserved_3;

	uint8_t port_id[3];
	uint8_t reserved_4;

	uint16_t reserved_5;

	uint16_t control_flags;		/* Control flags. */
	uint32_t total_byte_count;
	uint32_t error_subcode_1;
	uint32_t error_subcode_2;
};
/*
 * ISP queue - Mailbox Command entry structure definition.
 */
+5 −0

File changed.

Preview size limit exceeded, changes collapsed.

+12 −2

File changed.

Preview size limit exceeded, changes collapsed.

Loading