Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b6faaaf7 authored by Quinn Tran's avatar Quinn Tran Committed by Martin K. Petersen
Browse files

scsi: qla2xxx: Serialize mailbox request



For driver MBX submission, use mbox_busy to serialize request.  For Userspace
MBX submission, use optrom mutex to serialize request.

Signed-off-by: default avatarQuinn Tran <quinn.tran@cavium.com>
Signed-off-by: default avatarHimanshu Madhani <himanshu.madhani@cavium.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 8852f5b1
Loading
Loading
Loading
Loading
+88 −18
Original line number Diff line number Diff line
@@ -158,9 +158,17 @@ qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
	if (!capable(CAP_SYS_ADMIN))
		return 0;

	mutex_lock(&ha->optrom_mutex);
	if (qla2x00_chip_is_down(vha)) {
		mutex_unlock(&ha->optrom_mutex);
		return -EAGAIN;
	}

	if (IS_NOCACHE_VPD_TYPE(ha))
		ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
		    ha->nvram_size);
	mutex_unlock(&ha->optrom_mutex);

	return memory_read_from_buffer(buf, count, &off, ha->nvram,
					ha->nvram_size);
}
@@ -208,10 +216,17 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
		return -EAGAIN;
	}

	mutex_lock(&ha->optrom_mutex);
	if (qla2x00_chip_is_down(vha)) {
		mutex_unlock(&vha->hw->optrom_mutex);
		return -EAGAIN;
	}

	/* Write NVRAM. */
	ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
	ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
	     count);
	mutex_unlock(&ha->optrom_mutex);

	ql_dbg(ql_dbg_user, vha, 0x7060,
	    "Setting ISP_ABORT_NEEDED\n");
@@ -322,6 +337,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
		size = ha->optrom_size - start;

	mutex_lock(&ha->optrom_mutex);
	if (qla2x00_chip_is_down(vha)) {
		mutex_unlock(&ha->optrom_mutex);
		return -EAGAIN;
	}
	switch (val) {
	case 0:
		if (ha->optrom_state != QLA_SREADING &&
@@ -499,8 +518,14 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
		    qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
			faddr = ha->flt_region_vpd_sec << 2;

		mutex_lock(&ha->optrom_mutex);
		if (qla2x00_chip_is_down(vha)) {
			mutex_unlock(&ha->optrom_mutex);
			return -EAGAIN;
		}
		ha->isp_ops->read_optrom(vha, ha->vpd, faddr,
		    ha->vpd_size);
		mutex_unlock(&ha->optrom_mutex);
	}
	return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
}
@@ -518,9 +543,6 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
	if (unlikely(pci_channel_offline(ha->pdev)))
		return 0;

	if (qla2x00_chip_is_down(vha))
		return 0;

	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
	    !ha->isp_ops->write_nvram)
		return 0;
@@ -531,16 +553,25 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
		return -EAGAIN;
	}

	mutex_lock(&ha->optrom_mutex);
	if (qla2x00_chip_is_down(vha)) {
		mutex_unlock(&ha->optrom_mutex);
		return -EAGAIN;
	}

	/* Write NVRAM. */
	ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
	ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);

	/* Update flash version information for 4Gb & above. */
	if (!IS_FWI2_CAPABLE(ha))
	if (!IS_FWI2_CAPABLE(ha)) {
		mutex_unlock(&ha->optrom_mutex);
		return -EINVAL;
	}

	tmp_data = vmalloc(256);
	if (!tmp_data) {
		mutex_unlock(&ha->optrom_mutex);
		ql_log(ql_log_warn, vha, 0x706b,
		    "Unable to allocate memory for VPD information update.\n");
		return -ENOMEM;
@@ -548,6 +579,8 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
	ha->isp_ops->get_flash_version(vha, tmp_data);
	vfree(tmp_data);

	mutex_unlock(&ha->optrom_mutex);

	return count;
}

@@ -573,10 +606,15 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
	if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE)
		return 0;

	if (qla2x00_chip_is_down(vha))
	mutex_lock(&vha->hw->optrom_mutex);
	if (qla2x00_chip_is_down(vha)) {
		mutex_unlock(&vha->hw->optrom_mutex);
		return 0;
	}

	rval = qla2x00_read_sfp_dev(vha, buf, count);
	mutex_unlock(&vha->hw->optrom_mutex);

	if (rval)
		return -EIO;

@@ -785,9 +823,11 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,

	if (unlikely(pci_channel_offline(ha->pdev)))
		return 0;

	if (qla2x00_chip_is_down(vha))
	mutex_lock(&vha->hw->optrom_mutex);
	if (qla2x00_chip_is_down(vha)) {
		mutex_unlock(&vha->hw->optrom_mutex);
		return 0;
	}

	if (ha->xgmac_data)
		goto do_read;
@@ -795,6 +835,7 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
	ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
	    &ha->xgmac_data_dma, GFP_KERNEL);
	if (!ha->xgmac_data) {
		mutex_unlock(&vha->hw->optrom_mutex);
		ql_log(ql_log_warn, vha, 0x7076,
		    "Unable to allocate memory for XGMAC read-data.\n");
		return 0;
@@ -806,6 +847,8 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,

	rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
	    XGMAC_DATA_SIZE, &actual_size);

	mutex_unlock(&vha->hw->optrom_mutex);
	if (rval != QLA_SUCCESS) {
		ql_log(ql_log_warn, vha, 0x7077,
		    "Unable to read XGMAC data (%x).\n", rval);
@@ -842,13 +885,16 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,

	if (ha->dcbx_tlv)
		goto do_read;

	if (qla2x00_chip_is_down(vha))
	mutex_lock(&vha->hw->optrom_mutex);
	if (qla2x00_chip_is_down(vha)) {
		mutex_unlock(&vha->hw->optrom_mutex);
		return 0;
	}

	ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
	    &ha->dcbx_tlv_dma, GFP_KERNEL);
	if (!ha->dcbx_tlv) {
		mutex_unlock(&vha->hw->optrom_mutex);
		ql_log(ql_log_warn, vha, 0x7078,
		    "Unable to allocate memory for DCBX TLV read-data.\n");
		return -ENOMEM;
@@ -859,6 +905,9 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,

	rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
	    DCBX_TLV_DATA_SIZE);

	mutex_unlock(&vha->hw->optrom_mutex);

	if (rval != QLA_SUCCESS) {
		ql_log(ql_log_warn, vha, 0x7079,
		    "Unable to read DCBX TLV (%x).\n", rval);
@@ -1184,15 +1233,17 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
	if (IS_QLA2100(ha) || IS_QLA2200(ha))
		return -EPERM;

	if (sscanf(buf, "%d", &val) != 1)
		return -EINVAL;

	mutex_lock(&vha->hw->optrom_mutex);
	if (qla2x00_chip_is_down(vha)) {
		mutex_unlock(&vha->hw->optrom_mutex);
		ql_log(ql_log_warn, vha, 0x707a,
		    "Abort ISP active -- ignoring beacon request.\n");
		return -EBUSY;
	}

	if (sscanf(buf, "%d", &val) != 1)
		return -EINVAL;

	if (val)
		rval = ha->isp_ops->beacon_on(vha);
	else
@@ -1201,6 +1252,8 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
	if (rval != QLA_SUCCESS)
		count = 0;

	mutex_unlock(&vha->hw->optrom_mutex);

	return count;
}

@@ -1370,18 +1423,24 @@ qla2x00_thermal_temp_show(struct device *dev,
{
	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
	uint16_t temp = 0;
	int rc;

	mutex_lock(&vha->hw->optrom_mutex);
	if (qla2x00_chip_is_down(vha)) {
		mutex_unlock(&vha->hw->optrom_mutex);
		ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
		goto done;
	}

	if (vha->hw->flags.eeh_busy) {
		mutex_unlock(&vha->hw->optrom_mutex);
		ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
		goto done;
	}

	if (qla2x00_get_thermal_temp(vha, &temp) == QLA_SUCCESS)
	rc = qla2x00_get_thermal_temp(vha, &temp);
	mutex_unlock(&vha->hw->optrom_mutex);
	if (rc == QLA_SUCCESS)
		return scnprintf(buf, PAGE_SIZE, "%d\n", temp);

done:
@@ -1402,13 +1461,24 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
		return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
	}

	if (qla2x00_chip_is_down(vha))
	mutex_lock(&vha->hw->optrom_mutex);
	if (qla2x00_chip_is_down(vha)) {
		mutex_unlock(&vha->hw->optrom_mutex);
		ql_log(ql_log_warn, vha, 0x707c,
		    "ISP reset active.\n");
	else if (!vha->hw->flags.eeh_busy)
		goto out;
	} else if (vha->hw->flags.eeh_busy) {
		mutex_unlock(&vha->hw->optrom_mutex);
		goto out;
	}

	rval = qla2x00_get_firmware_state(vha, state);
	if (rval != QLA_SUCCESS)
	mutex_unlock(&vha->hw->optrom_mutex);
out:
	if (rval != QLA_SUCCESS) {
		memset(state, -1, sizeof(state));
		rval = qla2x00_get_firmware_state(vha, state);
	}

	return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
	    state[0], state[1], state[2], state[3], state[4], state[5]);
+30 −6
Original line number Diff line number Diff line
@@ -189,7 +189,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
		goto premature_exit;
	}

	ha->flags.mbox_busy = 1;

	/* Save mailbox command for debug */
	ha->mcp = mcp;

@@ -198,12 +198,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)

	spin_lock_irqsave(&ha->hardware_lock, flags);

	if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) {
	if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
	    ha->flags.mbox_busy) {
		rval = QLA_ABORTED;
		ha->flags.mbox_busy = 0;
		spin_unlock_irqrestore(&ha->hardware_lock, flags);
		goto premature_exit;
	}
	ha->flags.mbox_busy = 1;

	/* Load mailbox registers. */
	if (IS_P3P_TYPE(ha))
@@ -254,9 +255,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
		if (IS_P3P_TYPE(ha)) {
			if (RD_REG_DWORD(&reg->isp82.hint) &
				HINT_MBX_INT_PENDING) {
				ha->flags.mbox_busy = 0;
				spin_unlock_irqrestore(&ha->hardware_lock,
					flags);
				ha->flags.mbox_busy = 0;

				atomic_dec(&ha->num_pend_mbx_stage2);
				ql_dbg(ql_dbg_mbx, vha, 0x1010,
				    "Pending mailbox timeout, exiting.\n");
@@ -274,6 +276,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
		atomic_inc(&ha->num_pend_mbx_stage3);
		if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
		    mcp->tov * HZ)) {
			if (chip_reset != ha->chip_reset) {
				spin_lock_irqsave(&ha->hardware_lock, flags);
				ha->flags.mbox_busy = 0;
				spin_unlock_irqrestore(&ha->hardware_lock,
				    flags);
				atomic_dec(&ha->num_pend_mbx_stage2);
				atomic_dec(&ha->num_pend_mbx_stage3);
				rval = QLA_ABORTED;
				goto premature_exit;
			}
			ql_dbg(ql_dbg_mbx, vha, 0x117a,
			    "cmd=%x Timeout.\n", command);
			spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -282,7 +294,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)

		} else if (ha->flags.purge_mbox ||
		    chip_reset != ha->chip_reset) {
			spin_lock_irqsave(&ha->hardware_lock, flags);
			ha->flags.mbox_busy = 0;
			spin_unlock_irqrestore(&ha->hardware_lock, flags);
			atomic_dec(&ha->num_pend_mbx_stage2);
			atomic_dec(&ha->num_pend_mbx_stage3);
			rval = QLA_ABORTED;
@@ -300,9 +314,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
		if (IS_P3P_TYPE(ha)) {
			if (RD_REG_DWORD(&reg->isp82.hint) &
				HINT_MBX_INT_PENDING) {
				ha->flags.mbox_busy = 0;
				spin_unlock_irqrestore(&ha->hardware_lock,
					flags);
				ha->flags.mbox_busy = 0;
				atomic_dec(&ha->num_pend_mbx_stage2);
				ql_dbg(ql_dbg_mbx, vha, 0x1012,
				    "Pending mailbox timeout, exiting.\n");
@@ -320,7 +334,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
		while (!ha->flags.mbox_int) {
			if (ha->flags.purge_mbox ||
			    chip_reset != ha->chip_reset) {
				spin_lock_irqsave(&ha->hardware_lock, flags);
				ha->flags.mbox_busy = 0;
				spin_unlock_irqrestore(&ha->hardware_lock,
				    flags);
				atomic_dec(&ha->num_pend_mbx_stage2);
				rval = QLA_ABORTED;
				goto premature_exit;
@@ -363,7 +380,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
		clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);

		if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
			spin_lock_irqsave(&ha->hardware_lock, flags);
			ha->flags.mbox_busy = 0;
			spin_unlock_irqrestore(&ha->hardware_lock, flags);

			/* Setting Link-Down error */
			mcp->mb[0] = MBS_LINK_DOWN_ERROR;
			ha->mcp = NULL;
@@ -436,7 +456,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
				 * then only PCI ERR flag would be set.
				 * we will do premature exit for above case.
				 */
				spin_lock_irqsave(&ha->hardware_lock, flags);
				ha->flags.mbox_busy = 0;
				spin_unlock_irqrestore(&ha->hardware_lock,
				    flags);
				rval = QLA_FUNCTION_TIMEOUT;
				goto premature_exit;
			}
@@ -451,8 +474,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
			rval = QLA_FUNCTION_TIMEOUT;
		 }
	}

	spin_lock_irqsave(&ha->hardware_lock, flags);
	ha->flags.mbox_busy = 0;
	spin_unlock_irqrestore(&ha->hardware_lock, flags);

	/* Clean up */
	ha->mcp = NULL;
+9 −16
Original line number Diff line number Diff line
@@ -1491,27 +1491,14 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
	struct qla_hw_data *ha = tgt->ha;
	unsigned long flags;

	mutex_lock(&ha->optrom_mutex);
	mutex_lock(&qla_tgt_mutex);
	if (!vha->fc_vport) {
		struct Scsi_Host *sh = vha->host;
		struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
		bool npiv_vports;

		spin_lock_irqsave(sh->host_lock, flags);
		npiv_vports = (fc_host->npiv_vports_inuse);
		spin_unlock_irqrestore(sh->host_lock, flags);

		if (npiv_vports) {
			mutex_unlock(&qla_tgt_mutex);
			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
			    "NPIV is in use. Can not stop target\n");
			return -EPERM;
		}
	}
	if (tgt->tgt_stop || tgt->tgt_stopped) {
		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
		    "Already in tgt->tgt_stop or tgt_stopped state\n");
		mutex_unlock(&qla_tgt_mutex);
		mutex_unlock(&ha->optrom_mutex);
		return -EPERM;
	}

@@ -1549,6 +1536,8 @@ int qlt_stop_phase1(struct qla_tgt *tgt)

	/* Wait for sessions to clear out (just in case) */
	wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
	mutex_unlock(&ha->optrom_mutex);

	return 0;
}
EXPORT_SYMBOL(qlt_stop_phase1);
@@ -6595,6 +6584,9 @@ qlt_enable_vha(struct scsi_qla_host *vha)
	qlt_set_mode(vha);
	spin_unlock_irqrestore(&ha->hardware_lock, flags);

	mutex_lock(&ha->optrom_mutex);
	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
	    "%s.\n", __func__);
	if (vha->vp_idx) {
		qla24xx_disable_vp(vha);
		qla24xx_enable_vp(vha);
@@ -6603,6 +6595,7 @@ qlt_enable_vha(struct scsi_qla_host *vha)
		qla2xxx_wake_dpc(base_vha);
		qla2x00_wait_for_hba_online(base_vha);
	}
	mutex_unlock(&ha->optrom_mutex);
}
EXPORT_SYMBOL(qlt_enable_vha);