Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2e90f4b5 authored by James Smart's avatar James Smart Committed by James Bottomley
Browse files

[SCSI] lpfc 8.3.28: Critical Miscellaneous fixes



- Make lpfc_sli4_pci_mem_unset interface type aware (CR 124390)
- Convert byte count to word count when calling __iowrite32_copy (CR 122550)
- Checked the ERR1 and ERR2 registers for error attention due to SLI
  Port state affected by forced debug dump. (CR 122986, 122426, 124859)
- Use the lpfc_readl routine instead of the readl for the port status
  register read in lpfc_handle_eratt_s4 (CR 125403)
- Call lpfc_sli4_queue_destroy inside of lpfc_sli4_brdreset before doing
  a pci function reset (CR 125124, 125168, 125572, 125622)
- Zero out the HBQ when it is allocated (CR 125663)
- Alter port reset log messages to indicate error type (CR 125989)
- Added proper NULL pointer checking to all the places that accessing
  the queue memory (CR 125832)

Signed-off-by: default avatarAlex Iannicelli <alex.iannicelli@emulex.com>
Signed-off-by: default avatarJames Smart <james.smart@emulex.com>
Signed-off-by: default avatarJames Bottomley <JBottomley@Parallels.com>
parent df9e1b59
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
/*******************************************************************
 * This file is part of the Emulex Linux Device Driver for         *
 * Fibre Channel Host Bus Adapters.                                *
 * Copyright (C) 2004-2005 Emulex.  All rights reserved.           *
 * Copyright (C) 2004-2011 Emulex.  All rights reserved.           *
 * EMULEX and SLI are trademarks of Emulex.                        *
 * www.emulex.com                                                  *
 *                                                                 *
@@ -82,7 +82,8 @@ lpfc_memcpy_from_slim( void *dest, void __iomem *src, unsigned int bytes)
static inline void
lpfc_memcpy_to_slim( void __iomem *dest, void *src, unsigned int bytes)
{
	__iowrite32_copy(dest, src, bytes);
	/* convert bytes in argument list to word count for copy function */
	__iowrite32_copy(dest, src, bytes / sizeof(uint32_t));
}

static inline void
+113 −59
Original line number Diff line number Diff line
@@ -1997,6 +1997,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
	/* Get slow-path event queue information */
	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
			"Slow-path EQ information:\n");
	if (phba->sli4_hba.sp_eq) {
		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
			"\tEQID[%02d], "
			"QE-COUNT[%04d], QE-SIZE[%04d], "
@@ -2006,12 +2007,17 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
			phba->sli4_hba.sp_eq->entry_size,
			phba->sli4_hba.sp_eq->host_index,
			phba->sli4_hba.sp_eq->hba_index);
	}

	/* Get fast-path event queue information */
	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
			"Fast-path EQ information:\n");
	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) {
		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
	if (phba->sli4_hba.fp_eq) {
		for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
		     fcp_qidx++) {
			if (phba->sli4_hba.fp_eq[fcp_qidx]) {
				len += snprintf(pbuffer+len,
					LPFC_QUE_INFO_GET_BUF_SIZE-len,
				"\tEQID[%02d], "
				"QE-COUNT[%04d], QE-SIZE[%04d], "
				"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2021,11 +2027,14 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
				phba->sli4_hba.fp_eq[fcp_qidx]->host_index,
				phba->sli4_hba.fp_eq[fcp_qidx]->hba_index);
			}
		}
	}
	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");

	/* Get mailbox complete queue information */
	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
			"Slow-path MBX CQ information:\n");
	if (phba->sli4_hba.mbx_cq) {
		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
			"Associated EQID[%02d]:\n",
			phba->sli4_hba.mbx_cq->assoc_qid);
@@ -2038,10 +2047,12 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
			phba->sli4_hba.mbx_cq->entry_size,
			phba->sli4_hba.mbx_cq->host_index,
			phba->sli4_hba.mbx_cq->hba_index);
	}

	/* Get slow-path complete queue information */
	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
			"Slow-path ELS CQ information:\n");
	if (phba->sli4_hba.els_cq) {
		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
			"Associated EQID[%02d]:\n",
			phba->sli4_hba.els_cq->assoc_qid);
@@ -2054,16 +2065,21 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
			phba->sli4_hba.els_cq->entry_size,
			phba->sli4_hba.els_cq->host_index,
			phba->sli4_hba.els_cq->hba_index);
	}

	/* Get fast-path complete queue information */
	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
			"Fast-path FCP CQ information:\n");
	fcp_qidx = 0;
	if (phba->sli4_hba.fcp_cq) {
		do {
		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
			if (phba->sli4_hba.fcp_cq[fcp_qidx]) {
				len += snprintf(pbuffer+len,
					LPFC_QUE_INFO_GET_BUF_SIZE-len,
				"Associated EQID[%02d]:\n",
				phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
				len += snprintf(pbuffer+len,
					LPFC_QUE_INFO_GET_BUF_SIZE-len,
				"\tCQID[%02d], "
				"QE-COUNT[%04d], QE-SIZE[%04d], "
				"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2072,12 +2088,16 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
				phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size,
				phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
				phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
			}
		} while (++fcp_qidx < phba->cfg_fcp_eq_count);
	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
		len += snprintf(pbuffer+len,
				LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
	}

	/* Get mailbox queue information */
	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
			"Slow-path MBX MQ information:\n");
	if (phba->sli4_hba.mbx_wq) {
		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
			"Associated CQID[%02d]:\n",
			phba->sli4_hba.mbx_wq->assoc_qid);
@@ -2090,10 +2110,12 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
			phba->sli4_hba.mbx_wq->entry_size,
			phba->sli4_hba.mbx_wq->host_index,
			phba->sli4_hba.mbx_wq->hba_index);
	}

	/* Get slow-path work queue information */
	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
			"Slow-path ELS WQ information:\n");
	if (phba->sli4_hba.els_wq) {
		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
			"Associated CQID[%02d]:\n",
			phba->sli4_hba.els_wq->assoc_qid);
@@ -2106,15 +2128,22 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
			phba->sli4_hba.els_wq->entry_size,
			phba->sli4_hba.els_wq->host_index,
			phba->sli4_hba.els_wq->hba_index);
	}

	/* Get fast-path work queue information */
	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
			"Fast-path FCP WQ information:\n");
	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) {
		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
	if (phba->sli4_hba.fcp_wq) {
		for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
		     fcp_qidx++) {
			if (!phba->sli4_hba.fcp_wq[fcp_qidx])
				continue;
			len += snprintf(pbuffer+len,
					LPFC_QUE_INFO_GET_BUF_SIZE-len,
				"Associated CQID[%02d]:\n",
				phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid);
		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
			len += snprintf(pbuffer+len,
					LPFC_QUE_INFO_GET_BUF_SIZE-len,
				"\tWQID[%02d], "
				"QE-COUNT[%04d], WQE-SIZE[%04d], "
				"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2124,11 +2153,14 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
				phba->sli4_hba.fcp_wq[fcp_qidx]->host_index,
				phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index);
		}
	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
		len += snprintf(pbuffer+len,
				LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
	}

	/* Get receive queue information */
	len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
			"Slow-path RQ information:\n");
	if (phba->sli4_hba.hdr_rq && phba->sli4_hba.dat_rq) {
		len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
			"Associated CQID[%02d]:\n",
			phba->sli4_hba.hdr_rq->assoc_qid);
@@ -2150,7 +2182,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
			phba->sli4_hba.dat_rq->entry_size,
			phba->sli4_hba.dat_rq->host_index,
			phba->sli4_hba.dat_rq->hba_index);

	}
	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
}

@@ -2360,7 +2392,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
	switch (quetp) {
	case LPFC_IDIAG_EQ:
		/* Slow-path event queue */
		if (phba->sli4_hba.sp_eq->queue_id == queid) {
		if (phba->sli4_hba.sp_eq &&
		    phba->sli4_hba.sp_eq->queue_id == queid) {
			/* Sanity check */
			rc = lpfc_idiag_que_param_check(
					phba->sli4_hba.sp_eq, index, count);
@@ -2370,23 +2403,29 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
			goto pass_check;
		}
		/* Fast-path event queue */
		if (phba->sli4_hba.fp_eq) {
			for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
			if (phba->sli4_hba.fp_eq[qidx]->queue_id == queid) {
				if (phba->sli4_hba.fp_eq[qidx] &&
				    phba->sli4_hba.fp_eq[qidx]->queue_id ==
				    queid) {
					/* Sanity check */
					rc = lpfc_idiag_que_param_check(
						phba->sli4_hba.fp_eq[qidx],
						index, count);
					if (rc)
						goto error_out;
				idiag.ptr_private = phba->sli4_hba.fp_eq[qidx];
					idiag.ptr_private =
						phba->sli4_hba.fp_eq[qidx];
					goto pass_check;
				}
			}
		}
		goto error_out;
		break;
	case LPFC_IDIAG_CQ:
		/* MBX complete queue */
		if (phba->sli4_hba.mbx_cq->queue_id == queid) {
		if (phba->sli4_hba.mbx_cq &&
		    phba->sli4_hba.mbx_cq->queue_id == queid) {
			/* Sanity check */
			rc = lpfc_idiag_que_param_check(
					phba->sli4_hba.mbx_cq, index, count);
@@ -2396,7 +2435,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
			goto pass_check;
		}
		/* ELS complete queue */
		if (phba->sli4_hba.els_cq->queue_id == queid) {
		if (phba->sli4_hba.els_cq &&
		    phba->sli4_hba.els_cq->queue_id == queid) {
			/* Sanity check */
			rc = lpfc_idiag_que_param_check(
					phba->sli4_hba.els_cq, index, count);
@@ -2406,9 +2446,12 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
			goto pass_check;
		}
		/* FCP complete queue */
		if (phba->sli4_hba.fcp_cq) {
			qidx = 0;
			do {
			if (phba->sli4_hba.fcp_cq[qidx]->queue_id == queid) {
				if (phba->sli4_hba.fcp_cq[qidx] &&
				    phba->sli4_hba.fcp_cq[qidx]->queue_id ==
				    queid) {
					/* Sanity check */
					rc = lpfc_idiag_que_param_check(
						phba->sli4_hba.fcp_cq[qidx],
@@ -2420,11 +2463,13 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
					goto pass_check;
				}
			} while (++qidx < phba->cfg_fcp_eq_count);
		}
		goto error_out;
		break;
	case LPFC_IDIAG_MQ:
		/* MBX work queue */
		if (phba->sli4_hba.mbx_wq->queue_id == queid) {
		if (phba->sli4_hba.mbx_wq &&
		    phba->sli4_hba.mbx_wq->queue_id == queid) {
			/* Sanity check */
			rc = lpfc_idiag_que_param_check(
					phba->sli4_hba.mbx_wq, index, count);
@@ -2433,10 +2478,12 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
			idiag.ptr_private = phba->sli4_hba.mbx_wq;
			goto pass_check;
		}
		goto error_out;
		break;
	case LPFC_IDIAG_WQ:
		/* ELS work queue */
		if (phba->sli4_hba.els_wq->queue_id == queid) {
		if (phba->sli4_hba.els_wq &&
		    phba->sli4_hba.els_wq->queue_id == queid) {
			/* Sanity check */
			rc = lpfc_idiag_que_param_check(
					phba->sli4_hba.els_wq, index, count);
@@ -2446,8 +2493,12 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
			goto pass_check;
		}
		/* FCP work queue */
		if (phba->sli4_hba.fcp_wq) {
			for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) {
			if (phba->sli4_hba.fcp_wq[qidx]->queue_id == queid) {
				if (!phba->sli4_hba.fcp_wq[qidx])
					continue;
				if (phba->sli4_hba.fcp_wq[qidx]->queue_id ==
				    queid) {
					/* Sanity check */
					rc = lpfc_idiag_que_param_check(
						phba->sli4_hba.fcp_wq[qidx],
@@ -2459,11 +2510,13 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
					goto pass_check;
				}
			}
		}
		goto error_out;
		break;
	case LPFC_IDIAG_RQ:
		/* HDR queue */
		if (phba->sli4_hba.hdr_rq->queue_id == queid) {
		if (phba->sli4_hba.hdr_rq &&
		    phba->sli4_hba.hdr_rq->queue_id == queid) {
			/* Sanity check */
			rc = lpfc_idiag_que_param_check(
					phba->sli4_hba.hdr_rq, index, count);
@@ -2473,7 +2526,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
			goto pass_check;
		}
		/* DAT queue */
		if (phba->sli4_hba.dat_rq->queue_id == queid) {
		if (phba->sli4_hba.dat_rq &&
		    phba->sli4_hba.dat_rq->queue_id == queid) {
			/* Sanity check */
			rc = lpfc_idiag_que_param_check(
					phba->sli4_hba.dat_rq, index, count);
+116 −48
Original line number Diff line number Diff line
@@ -1417,7 +1417,10 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
	uint32_t event_data;
	struct Scsi_Host *shost;
	uint32_t if_type;
	struct lpfc_register portstat_reg;
	struct lpfc_register portstat_reg = {0};
	uint32_t reg_err1, reg_err2;
	uint32_t uerrlo_reg, uemasklo_reg;
	uint32_t pci_rd_rc1, pci_rd_rc2;
	int rc;

	/* If the pci channel is offline, ignore possible errors, since
@@ -1429,27 +1432,29 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
	if (!phba->cfg_enable_hba_reset)
		return;

	/* Send an internal error event to mgmt application */
	lpfc_board_errevt_to_mgmt(phba);

	/* For now, the actual action for SLI4 device handling is not
	 * specified yet, just treated it as adaptor hardware failure
	 */
	event_data = FC_REG_DUMP_EVENT;
	shost = lpfc_shost_from_vport(vport);
	fc_host_post_vendor_event(shost, fc_get_event_number(),
				  sizeof(event_data), (char *) &event_data,
				  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);

	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
	switch (if_type) {
	case LPFC_SLI_INTF_IF_TYPE_0:
		pci_rd_rc1 = lpfc_readl(
				phba->sli4_hba.u.if_type0.UERRLOregaddr,
				&uerrlo_reg);
		pci_rd_rc2 = lpfc_readl(
				phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
				&uemasklo_reg);
		/* consider PCI bus read error as pci_channel_offline */
		if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
			return;
		lpfc_sli4_offline_eratt(phba);
		break;
	case LPFC_SLI_INTF_IF_TYPE_2:
		portstat_reg.word0 =
			readl(phba->sli4_hba.u.if_type2.STATUSregaddr);

		pci_rd_rc1 = lpfc_readl(
				phba->sli4_hba.u.if_type2.STATUSregaddr,
				&portstat_reg.word0);
		/* consider PCI bus read error as pci_channel_offline */
		if (pci_rd_rc1 == -EIO)
			return;
		reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
		reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
		if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
			/* TODO: Register for Overtemp async events. */
			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -1459,8 +1464,20 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
			phba->over_temp_state = HBA_OVER_TEMP;
			spin_unlock_irq(&phba->hbalock);
			lpfc_sli4_offline_eratt(phba);
			return;
			break;
		}
		if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
		    reg_err2 == SLIPORT_ERR2_REG_FW_RESTART)
			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
					"3143 Port Down: Firmware Restarted\n");
		else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
			 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
					"3144 Port Down: Debug Dump\n");
		else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
			 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
					"3145 Port Down: Provisioning\n");
		/*
		 * On error status condition, driver need to wait for port
		 * ready before performing reset.
@@ -1469,14 +1486,19 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
		if (!rc) {
			/* need reset: attempt for port recovery */
			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
					"2887 Port Error: Attempting "
					"Port Recovery\n");
					"2887 Reset Needed: Attempting Port "
					"Recovery...\n");
			lpfc_offline_prep(phba);
			lpfc_offline(phba);
			lpfc_sli_brdrestart(phba);
			if (lpfc_online(phba) == 0) {
				lpfc_unblock_mgmt_io(phba);
				/* don't report event on forced debug dump */
				if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
				    reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
					return;
				else
					break;
			}
			/* fall through for not able to recover */
		}
@@ -1486,6 +1508,16 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
	default:
		break;
	}
	lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
			"3123 Report dump event to upper layer\n");
	/* Send an internal error event to mgmt application */
	lpfc_board_errevt_to_mgmt(phba);

	event_data = FC_REG_DUMP_EVENT;
	shost = lpfc_shost_from_vport(vport);
	fc_host_post_vendor_event(shost, fc_get_event_number(),
				  sizeof(event_data), (char *) &event_data,
				  SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
}

/**
@@ -6475,6 +6507,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
		phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
	}
	kfree(phba->sli4_hba.fcp_wq);
	phba->sli4_hba.fcp_wq = NULL;
out_free_els_wq:
	lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
	phba->sli4_hba.els_wq = NULL;
@@ -6487,6 +6520,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
		phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
	}
	kfree(phba->sli4_hba.fcp_cq);
	phba->sli4_hba.fcp_cq = NULL;
out_free_els_cq:
	lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
	phba->sli4_hba.els_cq = NULL;
@@ -6499,6 +6533,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
		phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
	}
	kfree(phba->sli4_hba.fp_eq);
	phba->sli4_hba.fp_eq = NULL;
out_free_sp_eq:
	lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
	phba->sli4_hba.sp_eq = NULL;
@@ -6532,7 +6567,9 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
	phba->sli4_hba.els_wq = NULL;

	/* Release FCP work queue */
	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
	if (phba->sli4_hba.fcp_wq != NULL)
		for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
		     fcp_qidx++)
			lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
	kfree(phba->sli4_hba.fcp_wq);
	phba->sli4_hba.fcp_wq = NULL;
@@ -6553,6 +6590,7 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)

	/* Release FCP response complete queue */
	fcp_qidx = 0;
	if (phba->sli4_hba.fcp_cq != NULL)
		do
			lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
		while (++fcp_qidx < phba->cfg_fcp_eq_count);
@@ -6560,7 +6598,9 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
	phba->sli4_hba.fcp_cq = NULL;

	/* Release fast-path event queue */
	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
	if (phba->sli4_hba.fp_eq != NULL)
		for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
		     fcp_qidx++)
			lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
	kfree(phba->sli4_hba.fp_eq);
	phba->sli4_hba.fp_eq = NULL;
@@ -6614,6 +6654,11 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
			phba->sli4_hba.sp_eq->queue_id);

	/* Set up fast-path event queue */
	if (!phba->sli4_hba.fp_eq) {
		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
				"3147 Fast-path EQs not allocated\n");
		goto out_destroy_sp_eq;
	}
	for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
		if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -6678,6 +6723,12 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
			phba->sli4_hba.sp_eq->queue_id);

	/* Set up fast-path FCP Response Complete Queue */
	if (!phba->sli4_hba.fcp_cq) {
		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
				"3148 Fast-path FCP CQ array not "
				"allocated\n");
		goto out_destroy_els_cq;
	}
	fcp_cqidx = 0;
	do {
		if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
@@ -6757,6 +6808,12 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
			phba->sli4_hba.els_cq->queue_id);

	/* Set up fast-path FCP Work Queue */
	if (!phba->sli4_hba.fcp_wq) {
		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
				"3149 Fast-path FCP WQ array not "
				"allocated\n");
		goto out_destroy_els_wq;
	}
	for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
		if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -6818,18 +6875,21 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
out_destroy_fcp_wq:
	for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
		lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
out_destroy_els_wq:
	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
out_destroy_mbx_wq:
	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
out_destroy_fcp_cq:
	for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
		lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
out_destroy_els_cq:
	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
out_destroy_mbx_cq:
	lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
out_destroy_fp_eq:
	for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
		lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
out_destroy_sp_eq:
	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
out_error:
	return rc;
@@ -6866,13 +6926,18 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
	/* Unset ELS complete queue */
	lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
	/* Unset FCP response complete queue */
	if (phba->sli4_hba.fcp_cq) {
		fcp_qidx = 0;
		do {
			lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
		} while (++fcp_qidx < phba->cfg_fcp_eq_count);
	}
	/* Unset fast-path event queue */
	for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
	if (phba->sli4_hba.fp_eq) {
		for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
		     fcp_qidx++)
			lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
	}
	/* Unset slow-path event queue */
	lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
}
@@ -7411,22 +7476,25 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
static void
lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
{
	struct pci_dev *pdev;

	/* Obtain PCI device reference */
	if (!phba->pcidev)
		return;
	else
		pdev = phba->pcidev;

	/* Free coherent DMA memory allocated */
	uint32_t if_type;
	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);

	/* Unmap I/O memory space */
	switch (if_type) {
	case LPFC_SLI_INTF_IF_TYPE_0:
		iounmap(phba->sli4_hba.drbl_regs_memmap_p);
		iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
		iounmap(phba->sli4_hba.conf_regs_memmap_p);

	return;
		break;
	case LPFC_SLI_INTF_IF_TYPE_2:
		iounmap(phba->sli4_hba.conf_regs_memmap_p);
		break;
	case LPFC_SLI_INTF_IF_TYPE_1:
	default:
		dev_printk(KERN_ERR, &phba->pcidev->dev,
			   "FATAL - unsupported SLI4 interface type - %d\n",
			   if_type);
		break;
	}
}

/**
+2 −2
Original line number Diff line number Diff line
@@ -389,7 +389,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
{
	struct hbq_dmabuf *hbqbp;

	hbqbp = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
	hbqbp = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
	if (!hbqbp)
		return NULL;

@@ -441,7 +441,7 @@ lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
{
	struct hbq_dmabuf *dma_buf;

	dma_buf = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
	dma_buf = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
	if (!dma_buf)
		return NULL;

+107 −28

File changed.

Preview size limit exceeded, changes collapsed.

Loading