Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 95422dec authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull SCSI fixes from James Bottomley:
 "This is a rather large set of fixes. The bulk are for lpfc correcting
  a lot of issues in the new NVME driver code which just went in in the
  merge window.

  The others are:

   - fix a hang in the vmware paravirt driver caused by incorrect
     handling of the new MSI vector allocation

   - long standing bug in storvsc, which recent block changes turned
     from being a harmless annoyance into a hang

   - yet more fallout (in mpt3sas) from the changes to device blocking

  The remainder are small fixes and updates"

* tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (34 commits)
  scsi: lpfc: Add shutdown method for kexec
  scsi: storvsc: Workaround for virtual DVD SCSI version
  scsi: lpfc: revise version number to 11.2.0.10
  scsi: lpfc: code cleanups in NVME initiator discovery
  scsi: lpfc: code cleanups in NVME initiator base
  scsi: lpfc: correct rdp diag portnames
  scsi: lpfc: remove dead sli3 nvme code
  scsi: lpfc: correct double print
  scsi: lpfc: Rename LPFC_MAX_EQ_DELAY to LPFC_MAX_EQ_DELAY_EQID_CNT
  scsi: lpfc: Rework lpfc Kconfig for NVME options
  scsi: lpfc: add transport eh_timed_out reference
  scsi: lpfc: Fix eh_deadline setting for sli3 adapters.
  scsi: lpfc: add NVME exchange aborts
  scsi: lpfc: Fix nvme allocation bug on failed nvme_fc_register_localport
  scsi: lpfc: Fix IO submission if WQ is full
  scsi: lpfc: Fix NVME CMD IU byte swapped word 1 problem
  scsi: lpfc: Fix RCTL value on NVME LS request and response
  scsi: lpfc: Fix crash during Hardware error recovery on SLI3 adapters
  scsi: lpfc: fix missing spin_unlock on sql_list_lock
  scsi: lpfc: don't dereference dma_buf->iocbq before null check
  ...
parents aabcf5fc a11be42a
Loading
Loading
Loading
Loading
+16 −3
Original line number Diff line number Diff line
@@ -1241,19 +1241,32 @@ config SCSI_LPFC
	tristate "Emulex LightPulse Fibre Channel Support"
	depends on PCI && SCSI
	depends on SCSI_FC_ATTRS
	depends on NVME_FC && NVME_TARGET_FC
	select CRC_T10DIF
	help
	---help---
          This lpfc driver supports the Emulex LightPulse
          Family of Fibre Channel PCI host adapters.

config SCSI_LPFC_DEBUG_FS
	bool "Emulex LightPulse Fibre Channel debugfs Support"
	depends on SCSI_LPFC && DEBUG_FS
	help
	---help---
	  This makes debugging information from the lpfc driver
	  available via the debugfs filesystem.

config LPFC_NVME_INITIATOR
	bool "Emulex LightPulse Fibre Channel NVME Initiator Support"
	depends on SCSI_LPFC && NVME_FC
	---help---
	  This enables NVME Initiator support in the Emulex lpfc driver.

config LPFC_NVME_TARGET
	bool "Emulex LightPulse Fibre Channel NVME Initiator Support"
	depends on SCSI_LPFC && NVME_TARGET_FC
	---help---
	  This enables NVME Target support in the Emulex lpfc driver.
	  Target enablement must still be enabled on a per adapter
	  basis by module parameters.

config SCSI_SIM710
	tristate "Simple 53c710 SCSI support (Compaq, NCR machines)"
	depends on (EISA || MCA) && SCSI
+1 −1
Original line number Diff line number Diff line
@@ -468,7 +468,7 @@ static int aac_src_check_health(struct aac_dev *dev)
	return -1;

err_blink:
	return (status > 16) & 0xFF;
	return (status >> 16) & 0xFF;
}

static inline u32 aac_get_vector(struct aac_dev *dev)
+25 −1
Original line number Diff line number Diff line
@@ -561,8 +561,12 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
	WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
	task->state = state;

	if (!list_empty(&task->running))
	spin_lock_bh(&conn->taskqueuelock);
	if (!list_empty(&task->running)) {
		pr_debug_once("%s while task on list", __func__);
		list_del_init(&task->running);
	}
	spin_unlock_bh(&conn->taskqueuelock);

	if (conn->task == task)
		conn->task = NULL;
@@ -784,7 +788,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
		if (session->tt->xmit_task(task))
			goto free_task;
	} else {
		spin_lock_bh(&conn->taskqueuelock);
		list_add_tail(&task->running, &conn->mgmtqueue);
		spin_unlock_bh(&conn->taskqueuelock);
		iscsi_conn_queue_work(conn);
	}

@@ -1475,8 +1481,10 @@ void iscsi_requeue_task(struct iscsi_task *task)
	 * this may be on the requeue list already if the xmit_task callout
	 * is handling the r2ts while we are adding new ones
	 */
	spin_lock_bh(&conn->taskqueuelock);
	if (list_empty(&task->running))
		list_add_tail(&task->running, &conn->requeue);
	spin_unlock_bh(&conn->taskqueuelock);
	iscsi_conn_queue_work(conn);
}
EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@@ -1513,22 +1521,26 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
	 * only have one nop-out as a ping from us and targets should not
	 * overflow us with nop-ins
	 */
	spin_lock_bh(&conn->taskqueuelock);
check_mgmt:
	while (!list_empty(&conn->mgmtqueue)) {
		conn->task = list_entry(conn->mgmtqueue.next,
					 struct iscsi_task, running);
		list_del_init(&conn->task->running);
		spin_unlock_bh(&conn->taskqueuelock);
		if (iscsi_prep_mgmt_task(conn, conn->task)) {
			/* regular RX path uses back_lock */
			spin_lock_bh(&conn->session->back_lock);
			__iscsi_put_task(conn->task);
			spin_unlock_bh(&conn->session->back_lock);
			conn->task = NULL;
			spin_lock_bh(&conn->taskqueuelock);
			continue;
		}
		rc = iscsi_xmit_task(conn);
		if (rc)
			goto done;
		spin_lock_bh(&conn->taskqueuelock);
	}

	/* process pending command queue */
@@ -1536,19 +1548,24 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
		conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,
					running);
		list_del_init(&conn->task->running);
		spin_unlock_bh(&conn->taskqueuelock);
		if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
			fail_scsi_task(conn->task, DID_IMM_RETRY);
			spin_lock_bh(&conn->taskqueuelock);
			continue;
		}
		rc = iscsi_prep_scsi_cmd_pdu(conn->task);
		if (rc) {
			if (rc == -ENOMEM || rc == -EACCES) {
				spin_lock_bh(&conn->taskqueuelock);
				list_add_tail(&conn->task->running,
					      &conn->cmdqueue);
				conn->task = NULL;
				spin_unlock_bh(&conn->taskqueuelock);
				goto done;
			} else
				fail_scsi_task(conn->task, DID_ABORT);
			spin_lock_bh(&conn->taskqueuelock);
			continue;
		}
		rc = iscsi_xmit_task(conn);
@@ -1559,6 +1576,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
		 * we need to check the mgmt queue for nops that need to
		 * be sent to aviod starvation
		 */
		spin_lock_bh(&conn->taskqueuelock);
		if (!list_empty(&conn->mgmtqueue))
			goto check_mgmt;
	}
@@ -1578,12 +1596,15 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
		conn->task = task;
		list_del_init(&conn->task->running);
		conn->task->state = ISCSI_TASK_RUNNING;
		spin_unlock_bh(&conn->taskqueuelock);
		rc = iscsi_xmit_task(conn);
		if (rc)
			goto done;
		spin_lock_bh(&conn->taskqueuelock);
		if (!list_empty(&conn->mgmtqueue))
			goto check_mgmt;
	}
	spin_unlock_bh(&conn->taskqueuelock);
	spin_unlock_bh(&conn->session->frwd_lock);
	return -ENODATA;

@@ -1739,7 +1760,9 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
			goto prepd_reject;
		}
	} else {
		spin_lock_bh(&conn->taskqueuelock);
		list_add_tail(&task->running, &conn->cmdqueue);
		spin_unlock_bh(&conn->taskqueuelock);
		iscsi_conn_queue_work(conn);
	}

@@ -2897,6 +2920,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
	INIT_LIST_HEAD(&conn->mgmtqueue);
	INIT_LIST_HEAD(&conn->cmdqueue);
	INIT_LIST_HEAD(&conn->requeue);
	spin_lock_init(&conn->taskqueuelock);
	INIT_WORK(&conn->xmitwork, iscsi_xmitworker);

	/* allocate login_task used for the login/text sequences */
+3 −1
Original line number Diff line number Diff line
@@ -99,12 +99,13 @@ struct lpfc_sli2_slim;
#define FC_MAX_ADPTMSG		64

#define MAX_HBAEVT	32
#define MAX_HBAS_NO_RESET 16

/* Number of MSI-X vectors the driver uses */
#define LPFC_MSIX_VECTORS	2

/* lpfc wait event data ready flag */
#define LPFC_DATA_READY		(1<<0)
#define LPFC_DATA_READY		0	/* bit 0 */

/* queue dump line buffer size */
#define LPFC_LBUF_SZ		128
@@ -692,6 +693,7 @@ struct lpfc_hba {
					 * capability
					 */
#define HBA_NVME_IOQ_FLUSH      0x80000 /* NVME IO queues flushed. */
#define NVME_XRI_ABORT_EVENT	0x100000

	uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
	struct lpfc_dmabuf slim2p;
+8 −1
Original line number Diff line number Diff line
@@ -3010,6 +3010,12 @@ MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
		   lpfc_poll_show, lpfc_poll_store);

int lpfc_no_hba_reset_cnt;
unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = {
	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
module_param_array(lpfc_no_hba_reset, ulong, &lpfc_no_hba_reset_cnt, 0444);
MODULE_PARM_DESC(lpfc_no_hba_reset, "WWPN of HBAs that should not be reset");

LPFC_ATTR(sli_mode, 0, 0, 3,
	"SLI mode selector:"
	" 0 - auto (SLI-3 if supported),"
@@ -4451,7 +4457,8 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
		return -EINVAL;

	phba->cfg_fcp_imax = (uint32_t)val;
	for (i = 0; i < phba->io_channel_irqs; i++)

	for (i = 0; i < phba->io_channel_irqs; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
		lpfc_modify_hba_eq_delay(phba, i);

	return strlen(buf);
Loading