Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2f5a3145 authored by James Bottomley's avatar James Bottomley
Browse files

Merge remote-tracking branch 'mkp-scsi/4.10/scsi-fixes' into fixes

parents a47fff10 cd60be49
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
@@ -5954,18 +5954,25 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
 free_vfi_bmask:
	kfree(phba->sli4_hba.vfi_bmask);
	phba->sli4_hba.vfi_bmask = NULL;
 free_xri_ids:
	kfree(phba->sli4_hba.xri_ids);
	phba->sli4_hba.xri_ids = NULL;
 free_xri_bmask:
	kfree(phba->sli4_hba.xri_bmask);
	phba->sli4_hba.xri_bmask = NULL;
 free_vpi_ids:
	kfree(phba->vpi_ids);
	phba->vpi_ids = NULL;
 free_vpi_bmask:
	kfree(phba->vpi_bmask);
	phba->vpi_bmask = NULL;
 free_rpi_ids:
	kfree(phba->sli4_hba.rpi_ids);
	phba->sli4_hba.rpi_ids = NULL;
 free_rpi_bmask:
	kfree(phba->sli4_hba.rpi_bmask);
	phba->sli4_hba.rpi_bmask = NULL;
 err_exit:
	return rc;
}
+13 −5
Original line number Diff line number Diff line
@@ -243,12 +243,15 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
	struct qla_hw_data *ha = vha->hw;
	ssize_t rval = 0;

	mutex_lock(&ha->optrom_mutex);

	if (ha->optrom_state != QLA_SREADING)
		return 0;
		goto out;

	mutex_lock(&ha->optrom_mutex);
	rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
	    ha->optrom_region_size);

out:
	mutex_unlock(&ha->optrom_mutex);

	return rval;
@@ -263,14 +266,19 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
	    struct device, kobj)));
	struct qla_hw_data *ha = vha->hw;

	if (ha->optrom_state != QLA_SWRITING)
	mutex_lock(&ha->optrom_mutex);

	if (ha->optrom_state != QLA_SWRITING) {
		mutex_unlock(&ha->optrom_mutex);
		return -EINVAL;
	if (off > ha->optrom_region_size)
	}
	if (off > ha->optrom_region_size) {
		mutex_unlock(&ha->optrom_mutex);
		return -ERANGE;
	}
	if (off + count > ha->optrom_region_size)
		count = ha->optrom_region_size - off;

	mutex_lock(&ha->optrom_mutex);
	memcpy(&ha->optrom_buffer[off], buf, count);
	mutex_unlock(&ha->optrom_mutex);

+1 −2
Original line number Diff line number Diff line
@@ -2732,7 +2732,7 @@ struct isp_operations {
#define QLA_MSIX_FW_MODE(m)	(((m) & (BIT_7|BIT_8|BIT_9)) >> 7)
#define QLA_MSIX_FW_MODE_1(m)	(QLA_MSIX_FW_MODE(m) == 1)

#define QLA_MSIX_DEFAULT		0x00
#define QLA_BASE_VECTORS	2 /* default + RSP */
#define QLA_MSIX_RSP_Q			0x01
#define QLA_ATIO_VECTOR		0x02
#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q	0x03
@@ -2754,7 +2754,6 @@ struct qla_msix_entry {
	uint16_t entry;
	char name[30];
	void *handle;
	struct irq_affinity_notify irq_notify;
	int cpuid;
};

+11 −77
Original line number Diff line number Diff line
@@ -19,10 +19,6 @@ static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
	sts_entry_t *);
static void qla_irq_affinity_notify(struct irq_affinity_notify *,
    const cpumask_t *);
static void qla_irq_affinity_release(struct kref *);


/**
 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
@@ -2572,14 +2568,6 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
	if (!vha->flags.online)
		return;

	if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) {
		/* if kernel does not notify qla of IRQ's CPU change,
		 * then set it here.
		 */
		rsp->msix->cpuid = smp_processor_id();
		ha->tgt.rspq_vector_cpuid = rsp->msix->cpuid;
	}

	while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
		pkt = (struct sts_entry_24xx *)rsp->ring_ptr;

@@ -3018,13 +3006,20 @@ static struct qla_init_msix_entry qla82xx_msix_entries[] = {
static int
qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
{
#define MIN_MSIX_COUNT	2
	int i, ret;
	struct qla_msix_entry *qentry;
	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
	struct irq_affinity desc = {
		.pre_vectors = QLA_BASE_VECTORS,
	};

	if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha))
		desc.pre_vectors++;

	ret = pci_alloc_irq_vectors_affinity(ha->pdev, QLA_BASE_VECTORS,
			ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
			&desc);

	ret = pci_alloc_irq_vectors(ha->pdev, MIN_MSIX_COUNT, ha->msix_count,
				    PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
	if (ret < 0) {
		ql_log(ql_log_fatal, vha, 0x00c7,
		    "MSI-X: Failed to enable support, "
@@ -3069,13 +3064,10 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
		qentry->have_irq = 0;
		qentry->in_use = 0;
		qentry->handle = NULL;
		qentry->irq_notify.notify  = qla_irq_affinity_notify;
		qentry->irq_notify.release = qla_irq_affinity_release;
		qentry->cpuid = -1;
	}

	/* Enable MSI-X vectors for the base queue */
	for (i = 0; i < (QLA_MSIX_RSP_Q + 1); i++) {
	for (i = 0; i < QLA_BASE_VECTORS; i++) {
		qentry = &ha->msix_entries[i];
		qentry->handle = rsp;
		rsp->msix = qentry;
@@ -3093,18 +3085,6 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
			goto msix_register_fail;
		qentry->have_irq = 1;
		qentry->in_use = 1;

		/* Register for CPU affinity notification. */
		irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify);

		/* Schedule work (ie. trigger a notification) to read cpu
		 * mask for this specific irq.
		 * kref_get is required because
		* irq_affinity_notify() will do
		* kref_put().
		*/
		kref_get(&qentry->irq_notify.kref);
		schedule_work(&qentry->irq_notify.work);
	}

	/*
@@ -3301,49 +3281,3 @@ int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
	msix->handle = qpair;
	return ret;
}


/* irq_set_affinity/irqbalance will trigger notification of cpu mask update */
static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
	const cpumask_t *mask)
{
	struct qla_msix_entry *e =
		container_of(notify, struct qla_msix_entry, irq_notify);
	struct qla_hw_data *ha;
	struct scsi_qla_host *base_vha;
	struct rsp_que *rsp = e->handle;

	/* user is recommended to set mask to just 1 cpu */
	e->cpuid = cpumask_first(mask);

	ha = rsp->hw;
	base_vha = pci_get_drvdata(ha->pdev);

	ql_dbg(ql_dbg_init, base_vha, 0xffff,
	    "%s: host %ld : vector %d cpu %d \n", __func__,
	    base_vha->host_no, e->vector, e->cpuid);

	if (e->have_irq) {
		if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) &&
		    (e->entry == QLA83XX_RSPQ_MSIX_ENTRY_NUMBER)) {
			ha->tgt.rspq_vector_cpuid = e->cpuid;
			ql_dbg(ql_dbg_init, base_vha, 0xffff,
			    "%s: host%ld: rspq vector %d cpu %d  runtime change\n",
			    __func__, base_vha->host_no, e->vector, e->cpuid);
		}
	}
}

static void qla_irq_affinity_release(struct kref *ref)
{
	struct irq_affinity_notify *notify =
		container_of(ref, struct irq_affinity_notify, kref);
	struct qla_msix_entry *e =
		container_of(notify, struct qla_msix_entry, irq_notify);
	struct rsp_que *rsp = e->handle;
	struct scsi_qla_host *base_vha = pci_get_drvdata(rsp->hw->pdev);

	ql_dbg(ql_dbg_init, base_vha, 0xffff,
		"%s: host%ld: vector %d cpu %d\n", __func__,
	    base_vha->host_no, e->vector, e->cpuid);
}
+1 −1
Original line number Diff line number Diff line
@@ -466,7 +466,7 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
			continue;

		rsp = ha->rsp_q_map[cnt];
		clear_bit(cnt, ha->req_qid_map);
		clear_bit(cnt, ha->rsp_qid_map);
		ha->rsp_q_map[cnt] =  NULL;
		spin_unlock_irqrestore(&ha->hardware_lock, flags);
		qla2x00_free_rsp_que(ha, rsp);