Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d889344e authored by Sasikumar Chandrasekaran's avatar Sasikumar Chandrasekaran Committed by Martin K. Petersen
Browse files

scsi: megaraid_sas: Dynamic Raid Map Changes for SAS3.5 Generic Megaraid Controllers



SAS3.5 Generic Megaraid Controllers FW will support new dynamic RaidMap to have different
sizes for different number of supported VDs.

Signed-off-by: default avatarSasikumar Chandrasekaran <sasikumar.pc@broadcom.com>
Reviewed-by: default avatarTomas Henzl <thenzl@redhat.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 69c337c0
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
@@ -1434,6 +1434,12 @@ enum FW_BOOT_CONTEXT {
#define MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT    14
#define MR_MAX_MSIX_REG_ARRAY                   16
#define MR_RDPQ_MODE_OFFSET			0X00800000

#define MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT	16
#define MR_MAX_RAID_MAP_SIZE_MASK		0x1FF
#define MR_MIN_MAP_SIZE				0x10000
/* 64k */

#define MR_CAN_HANDLE_SYNC_CACHE_OFFSET		0X01000000

/*
@@ -2151,6 +2157,7 @@ struct megasas_instance {
	bool fw_sync_cache_support;
	bool is_ventura;
	bool msix_combined;
	u16 max_raid_mapsize;
};
struct MR_LD_VF_MAP {
	u32 size;
+43 −17
Original line number Diff line number Diff line
@@ -4424,8 +4424,7 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
static void megasas_update_ext_vd_details(struct megasas_instance *instance)
{
	struct fusion_context *fusion;
	u32 old_map_sz;
	u32 new_map_sz;
	u32 ventura_map_sz = 0;

	fusion = instance->ctrl_context;
	/* For MFI based controllers return dummy success */
@@ -4455,21 +4454,38 @@ static void megasas_update_ext_vd_details(struct megasas_instance *instance)
		instance->supportmax256vd ? "Extended VD(240 VD)firmware" :
		"Legacy(64 VD) firmware");

	old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
	if (instance->max_raid_mapsize) {
		ventura_map_sz = instance->max_raid_mapsize *
						MR_MIN_MAP_SIZE; /* 64k */
		fusion->current_map_sz = ventura_map_sz;
		fusion->max_map_sz = ventura_map_sz;
	} else {
		fusion->old_map_sz =  sizeof(struct MR_FW_RAID_MAP) +
					(sizeof(struct MR_LD_SPAN_MAP) *
					(instance->fw_supported_vd_count - 1));
	new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
	fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP) +
				(sizeof(struct MR_LD_SPAN_MAP) *
				(instance->drv_supported_vd_count - 1));

	fusion->max_map_sz = max(old_map_sz, new_map_sz);
		fusion->new_map_sz =  sizeof(struct MR_FW_RAID_MAP_EXT);

		fusion->max_map_sz =
			max(fusion->old_map_sz, fusion->new_map_sz);

		if (instance->supportmax256vd)
		fusion->current_map_sz = new_map_sz;
			fusion->current_map_sz = fusion->new_map_sz;
		else
		fusion->current_map_sz = old_map_sz;
			fusion->current_map_sz = fusion->old_map_sz;
	}
	/* irrespective of FW raid maps, driver raid map is constant */
	fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);

#if VD_EXT_DEBUG
	dev_info(&instance->pdev->dev, "instance->max_raid_mapsize 0x%x\n ",
		instance->max_raid_mapsize);
	dev_info(&instance->pdev->dev, "new_map_sz = 0x%x, old_map_sz = 0x%x\n",
		fusion->new_map_sz, fusion->old_map_sz);
	dev_info(&instance->pdev->dev, "ventura_map_sz = 0x%x, current_map_sz = 0x%x\n",
		ventura_map_sz, fusion->current_map_sz);
	dev_info(&instance->pdev->dev, "fusion->drv_map_sz =0x%x, size of driver raid map 0x%lx\n",
		fusion->drv_map_sz, sizeof(struct MR_DRV_RAID_MAP_ALL));
#endif
}

/**
@@ -4996,7 +5012,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
{
	u32 max_sectors_1;
	u32 max_sectors_2;
	u32 tmp_sectors, msix_enable, scratch_pad_2;
	u32 tmp_sectors, msix_enable, scratch_pad_2, scratch_pad_3;
	resource_size_t base_addr;
	struct megasas_register_set __iomem *reg_set;
	struct megasas_ctrl_info *ctrl_info = NULL;
@@ -5072,7 +5088,17 @@ static int megasas_init_fw(struct megasas_instance *instance)
			goto fail_ready_state;
	}


	if (instance->is_ventura) {
		scratch_pad_3 =
			readl(&instance->reg_set->outbound_scratch_pad_3);
#if VD_EXT_DEBUG
		dev_info(&instance->pdev->dev, "scratch_pad3 0x%x\n",
			scratch_pad_3);
#endif
		instance->max_raid_mapsize = ((scratch_pad_3 >>
			MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
			MR_MAX_RAID_MAP_SIZE_MASK);
	}

	/* Check if MSI-X is supported while in ready state */
	msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
+261 −40
Original line number Diff line number Diff line
@@ -179,18 +179,204 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
	struct fusion_context *fusion = instance->ctrl_context;
	struct MR_FW_RAID_MAP_ALL     *fw_map_old    = NULL;
	struct MR_FW_RAID_MAP         *pFwRaidMap    = NULL;
	int i;
	int i, j;
	u16 ld_count;
	struct MR_FW_RAID_MAP_DYNAMIC *fw_map_dyn;
	struct MR_FW_RAID_MAP_EXT *fw_map_ext;
	struct MR_RAID_MAP_DESC_TABLE *desc_table;


	struct MR_DRV_RAID_MAP_ALL *drv_map =
			fusion->ld_drv_map[(instance->map_id & 1)];
	struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
	void *raid_map_data = NULL;

	memset(drv_map, 0, fusion->drv_map_sz);
	memset(pDrvRaidMap->ldTgtIdToLd,
		0xff, (sizeof(u16) * MAX_LOGICAL_DRIVES_DYN));

	if (instance->max_raid_mapsize) {
		fw_map_dyn = fusion->ld_map[(instance->map_id & 1)];
#if VD_EXT_DEBUG
		dev_dbg(&instance->pdev->dev, "raidMapSize 0x%x fw_map_dyn->descTableOffset 0x%x\n",
			le32_to_cpu(fw_map_dyn->raid_map_size),
			le32_to_cpu(fw_map_dyn->desc_table_offset));
		dev_dbg(&instance->pdev->dev, "descTableSize 0x%x descTableNumElements 0x%x\n",
			le32_to_cpu(fw_map_dyn->desc_table_size),
			le32_to_cpu(fw_map_dyn->desc_table_num_elements));
		dev_dbg(&instance->pdev->dev, "drv map %p ldCount %d\n",
			drv_map, fw_map_dyn->ld_count);
#endif
		desc_table =
		(struct MR_RAID_MAP_DESC_TABLE *)((void *)fw_map_dyn + le32_to_cpu(fw_map_dyn->desc_table_offset));
		if (desc_table != fw_map_dyn->raid_map_desc_table)
			dev_dbg(&instance->pdev->dev, "offsets of desc table are not matching desc %p original %p\n",
				desc_table, fw_map_dyn->raid_map_desc_table);

		ld_count = (u16)le16_to_cpu(fw_map_dyn->ld_count);
		pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
		pDrvRaidMap->fpPdIoTimeoutSec =
			fw_map_dyn->fp_pd_io_timeout_sec;
		pDrvRaidMap->totalSize = sizeof(struct MR_DRV_RAID_MAP_ALL);
		/* point to actual data starting point*/
		raid_map_data = (void *)fw_map_dyn +
			le32_to_cpu(fw_map_dyn->desc_table_offset) +
			le32_to_cpu(fw_map_dyn->desc_table_size);

		for (i = 0; i < le32_to_cpu(fw_map_dyn->desc_table_num_elements); ++i) {

#if VD_EXT_DEBUG
			dev_dbg(&instance->pdev->dev, "desc table %p\n",
				desc_table);
			dev_dbg(&instance->pdev->dev, "raidmap type %d, raidmapOffset 0x%x\n",
				desc_table->raid_map_desc_type,
				desc_table->raid_map_desc_offset);
			dev_dbg(&instance->pdev->dev, "raid map number of elements 0%x, raidmapsize 0x%x\n",
				desc_table->raid_map_desc_elements,
				desc_table->raid_map_desc_buffer_size);
#endif
			switch (le32_to_cpu(desc_table->raid_map_desc_type)) {
			case RAID_MAP_DESC_TYPE_DEVHDL_INFO:
				fw_map_dyn->dev_hndl_info =
				(struct MR_DEV_HANDLE_INFO *)(raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset));
#if VD_EXT_DEBUG
				dev_dbg(&instance->pdev->dev, "devHndlInfo  address %p\n",
					fw_map_dyn->dev_hndl_info);
#endif
				memcpy(pDrvRaidMap->devHndlInfo,
				fw_map_dyn->dev_hndl_info,
				sizeof(struct MR_DEV_HANDLE_INFO) *
				le32_to_cpu(desc_table->raid_map_desc_elements));
			break;
			case RAID_MAP_DESC_TYPE_TGTID_INFO:
				fw_map_dyn->ld_tgt_id_to_ld =
				(u16 *) (raid_map_data +
				le32_to_cpu(desc_table->raid_map_desc_offset));
#if VD_EXT_DEBUG
			dev_dbg(&instance->pdev->dev, "ldTgtIdToLd  address %p\n",
				fw_map_dyn->ld_tgt_id_to_ld);
#endif
			for (j = 0; j < le32_to_cpu(desc_table->raid_map_desc_elements); j++) {
				pDrvRaidMap->ldTgtIdToLd[j] =
				fw_map_dyn->ld_tgt_id_to_ld[j];
#if VD_EXT_DEBUG
				dev_dbg(&instance->pdev->dev, " %d drv ldTgtIdToLd %d\n",
					j, pDrvRaidMap->ldTgtIdToLd[j]);
#endif
			}
			break;
			case RAID_MAP_DESC_TYPE_ARRAY_INFO:
				fw_map_dyn->ar_map_info =
				(struct MR_ARRAY_INFO *)
				(raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset));
#if VD_EXT_DEBUG
				dev_dbg(&instance->pdev->dev, "arMapInfo  address %p\n",
					fw_map_dyn->ar_map_info);
#endif

				memcpy(pDrvRaidMap->arMapInfo,
				fw_map_dyn->ar_map_info,
				sizeof(struct MR_ARRAY_INFO) * le32_to_cpu(desc_table->raid_map_desc_elements));
			break;
			case RAID_MAP_DESC_TYPE_SPAN_INFO:
				fw_map_dyn->ld_span_map =
				(struct MR_LD_SPAN_MAP *)
				(raid_map_data + le32_to_cpu(desc_table->raid_map_desc_offset));
				memcpy(pDrvRaidMap->ldSpanMap,
				fw_map_dyn->ld_span_map,
				sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(desc_table->raid_map_desc_elements));
#if VD_EXT_DEBUG
				dev_dbg(&instance->pdev->dev, "ldSpanMap  address %p\n",
					fw_map_dyn->ld_span_map);
				dev_dbg(&instance->pdev->dev, "MR_LD_SPAN_MAP size 0x%lx\n",
					sizeof(struct MR_LD_SPAN_MAP));
				for (j = 0; j < ld_count; j++) {
					dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) : fw_map_dyn->ldSpanMap[%d].ldRaid.targetId 0x%x\n",
					j, j, fw_map_dyn->ld_span_map[j].ldRaid.targetId);
					dev_dbg(&instance->pdev->dev, "fw_map_dyn->ldSpanMap[%d].ldRaid.seqNum 0x%x\n",
					j, fw_map_dyn->ld_span_map[j].ldRaid.seqNum);
					dev_dbg(&instance->pdev->dev, "fw_map_dyn->ld_span_map[%d].ldRaid.rowSize 0x%x\n",
					j, (u32)fw_map_dyn->ld_span_map[j].ldRaid.rowSize);

					dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) :pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x\n",
					j, j, pDrvRaidMap->ldSpanMap[j].ldRaid.targetId);
					dev_dbg(&instance->pdev->dev, "DrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x\n",
					j, pDrvRaidMap->ldSpanMap[j].ldRaid.seqNum);
					dev_dbg(&instance->pdev->dev, "pDrvRaidMap->ldSpanMap[%d].ldRaid.rowSize 0x%x\n",
					j, (u32)pDrvRaidMap->ldSpanMap[j].ldRaid.rowSize);

					dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) : drv raid map all %p\n",
					instance->unique_id, drv_map);
					dev_dbg(&instance->pdev->dev, "raid map %p LD RAID MAP %p/%p\n",
					pDrvRaidMap,
					&fw_map_dyn->ld_span_map[j].ldRaid,
					&pDrvRaidMap->ldSpanMap[j].ldRaid);
				}
#endif
			break;
			default:
				dev_dbg(&instance->pdev->dev, "wrong number of desctableElements %d\n",
					fw_map_dyn->desc_table_num_elements);
			}
			++desc_table;
		}

	} else if (instance->supportmax256vd) {
		fw_map_ext =
		(struct MR_FW_RAID_MAP_EXT *) fusion->ld_map[(instance->map_id & 1)];
		ld_count = (u16)le16_to_cpu(fw_map_ext->ldCount);
		if (ld_count > MAX_LOGICAL_DRIVES_EXT) {
			dev_dbg(&instance->pdev->dev, "megaraid_sas: LD count exposed in RAID map in not valid\n");
			return;
		}
#if VD_EXT_DEBUG
		for (i = 0; i < ld_count; i++) {
			dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) :Index 0x%x\n",
				instance->unique_id, i);
			dev_dbg(&instance->pdev->dev, "Target Id 0x%x\n",
				fw_map_ext->ldSpanMap[i].ldRaid.targetId);
			dev_dbg(&instance->pdev->dev, "Seq Num 0x%x Size 0/%llx\n",
				fw_map_ext->ldSpanMap[i].ldRaid.seqNum,
				fw_map_ext->ldSpanMap[i].ldRaid.size);
		}
#endif

		pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
		pDrvRaidMap->fpPdIoTimeoutSec = fw_map_ext->fpPdIoTimeoutSec;
		for (i = 0; i < (MAX_LOGICAL_DRIVES_EXT); i++)
			pDrvRaidMap->ldTgtIdToLd[i] =
				(u16)fw_map_ext->ldTgtIdToLd[i];
		memcpy(pDrvRaidMap->ldSpanMap, fw_map_ext->ldSpanMap,
				sizeof(struct MR_LD_SPAN_MAP) * ld_count);
#if VD_EXT_DEBUG
		for (i = 0; i < ld_count; i++) {
			dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) : fw_map_ext->ldSpanMap[%d].ldRaid.targetId 0x%x\n",
			i, i, fw_map_ext->ldSpanMap[i].ldRaid.targetId);
			dev_dbg(&instance->pdev->dev, "fw_map_ext->ldSpanMap[%d].ldRaid.seqNum 0x%x\n",
			i, fw_map_ext->ldSpanMap[i].ldRaid.seqNum);
			dev_dbg(&instance->pdev->dev, "fw_map_ext->ldSpanMap[%d].ldRaid.rowSize 0x%x\n",
			i, (u32)fw_map_ext->ldSpanMap[i].ldRaid.rowSize);

			dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) : pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x\n",
			i, i, pDrvRaidMap->ldSpanMap[i].ldRaid.targetId);
			dev_dbg(&instance->pdev->dev, "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x\n",
			i, pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum);
			dev_dbg(&instance->pdev->dev, "pDrvRaidMap->ldSpanMap[%d].ldRaid.rowSize 0x%x\n",
			i, (u32)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize);

			dev_dbg(&instance->pdev->dev, "megaraid_sas(%d) : drv raid map all %p\n",
			instance->unique_id, drv_map);
			dev_dbg(&instance->pdev->dev, "raid map %p LD RAID MAP %p %p\n",
			pDrvRaidMap, &fw_map_ext->ldSpanMap[i].ldRaid,
			&pDrvRaidMap->ldSpanMap[i].ldRaid);
		}
#endif
		memcpy(pDrvRaidMap->arMapInfo, fw_map_ext->arMapInfo,
			sizeof(struct MR_ARRAY_INFO) * MAX_API_ARRAYS_EXT);
		memcpy(pDrvRaidMap->devHndlInfo, fw_map_ext->devHndlInfo,
			sizeof(struct MR_DEV_HANDLE_INFO) *
					MAX_RAIDMAP_PHYSICAL_DEVICES);

	if (instance->supportmax256vd) {
		memcpy(fusion->ld_drv_map[instance->map_id & 1],
			fusion->ld_map[instance->map_id & 1],
			fusion->current_map_sz);
		/* New Raid map will not set totalSize, so keep expected value
		 * for legacy code in ValidateMapInfo
		 */
@@ -213,16 +399,12 @@ void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
		}
#endif

		memset(drv_map, 0, fusion->drv_map_sz);
		pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
		pDrvRaidMap->ldCount = (__le16)cpu_to_le16(ld_count);
		pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
		for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++)
			pDrvRaidMap->ldTgtIdToLd[i] =
				(u8)pFwRaidMap->ldTgtIdToLd[i];
		for (i = (MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS);
			i < MAX_LOGICAL_DRIVES_EXT; i++)
			pDrvRaidMap->ldTgtIdToLd[i] = 0xff;
		for (i = 0; i < ld_count; i++) {
			pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i];
#if VD_EXT_DEBUG
@@ -279,7 +461,9 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
	lbInfo = fusion->load_balance_info;
	ldSpanInfo = fusion->log_to_span;

	if (instance->supportmax256vd)
	if (instance->max_raid_mapsize)
		expected_size = sizeof(struct MR_DRV_RAID_MAP_ALL);
	else if (instance->supportmax256vd)
		expected_size = sizeof(struct MR_FW_RAID_MAP_EXT);
	else
		expected_size =
@@ -287,7 +471,9 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
			(sizeof(struct MR_LD_SPAN_MAP) * le16_to_cpu(pDrvRaidMap->ldCount)));

	if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) {
		dev_err(&instance->pdev->dev, "map info structure size 0x%x is not matching with ld count\n",
		dev_dbg(&instance->pdev->dev, "megasas: map info structure size 0x%x",
			le32_to_cpu(pDrvRaidMap->totalSize));
		dev_dbg(&instance->pdev->dev, "is not matching expected size 0x%x\n",
			(unsigned int) expected_size);
		dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n",
			(unsigned int)sizeof(struct MR_LD_SPAN_MAP),
@@ -787,7 +973,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
			((fusion->adapter_type == THUNDERBOLT_SERIES)  ||
			((fusion->adapter_type == INVADER_SERIES) &&
			(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
			pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
			pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
		else if (raid->level == 1) {
			physArm = physArm + 1;
			pd = MR_ArPdGet(arRef, physArm, map);
@@ -797,9 +983,16 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
	}

	*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
	pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
					physArm;
	io_info->span_arm = pRAID_Context->spanArm;
	if (instance->is_ventura) {
		((struct RAID_CONTEXT_G35 *) pRAID_Context)->span_arm =
			(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
		io_info->span_arm =
			(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
	} else {
		pRAID_Context->span_arm =
			(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
		io_info->span_arm = pRAID_Context->span_arm;
	}
	return retval;
}

@@ -891,7 +1084,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
			((fusion->adapter_type == THUNDERBOLT_SERIES)  ||
			((fusion->adapter_type == INVADER_SERIES) &&
			(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
			pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
			pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
		else if (raid->level == 1) {
			/* Get alternate Pd. */
			physArm = physArm + 1;
@@ -903,9 +1096,16 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
	}

	*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
	pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
		physArm;
	io_info->span_arm = pRAID_Context->spanArm;
	if (instance->is_ventura) {
		((struct RAID_CONTEXT_G35 *) pRAID_Context)->span_arm =
				(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
		io_info->span_arm =
				(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
	} else {
		pRAID_Context->span_arm =
			(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
		io_info->span_arm = pRAID_Context->span_arm;
	}
	return retval;
}

@@ -1109,20 +1309,20 @@ MR_BuildRaidContext(struct megasas_instance *instance,
			regSize += stripSize;
	}

	pRAID_Context->timeoutValue =
	pRAID_Context->timeout_value =
		cpu_to_le16(raid->fpIoTimeoutForLd ?
			    raid->fpIoTimeoutForLd :
			    map->raidMap.fpPdIoTimeoutSec);
	if (fusion->adapter_type == INVADER_SERIES)
		pRAID_Context->regLockFlags = (isRead) ?
		pRAID_Context->reg_lock_flags = (isRead) ?
			raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
	else
		pRAID_Context->regLockFlags = (isRead) ?
	else if (!instance->is_ventura)
		pRAID_Context->reg_lock_flags = (isRead) ?
			REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
	pRAID_Context->VirtualDiskTgtId = raid->targetId;
	pRAID_Context->regLockRowLBA    = cpu_to_le64(regStart);
	pRAID_Context->regLockLength    = cpu_to_le32(regSize);
	pRAID_Context->configSeqNum	= raid->seqNum;
	pRAID_Context->virtual_disk_tgt_id = raid->targetId;
	pRAID_Context->reg_lock_row_lba    = cpu_to_le64(regStart);
	pRAID_Context->reg_lock_length    = cpu_to_le32(regSize);
	pRAID_Context->config_seq_num	= raid->seqNum;
	/* save pointer to raid->LUN array */
	*raidLUN = raid->LUN;

@@ -1140,6 +1340,13 @@ MR_BuildRaidContext(struct megasas_instance *instance,
		/* If IO on an invalid Pd, then FP is not possible.*/
		if (io_info->devHandle == cpu_to_le16(MR_PD_INVALID))
			io_info->fpOkForIo = FALSE;
		/* if FP possible, set the SLUD bit in
		 *  regLockFlags for ventura
		 */
		else if ((instance->is_ventura) && (!isRead) &&
			(raid->writeMode == MR_RL_WRITE_BACK_MODE) &&
			(raid->capability.fp_cache_bypass_capable))
			((struct RAID_CONTEXT_G35 *) pRAID_Context)->routing_flags.bits.sld = 1;
		/* set raid 1/10 fast path write capable bit in io_info */
		if (io_info->fpOkForIo &&
		    (io_info->r1_alt_dev_handle != MR_PD_INVALID) &&
@@ -1319,6 +1526,7 @@ u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
	struct fusion_context *fusion;
	struct MR_LD_RAID  *raid;
	struct MR_DRV_RAID_MAP_ALL *drv_map;
	u16	pd1_dev_handle;
	u16     pend0, pend1, ld;
	u64     diff0, diff1;
	u8      bestArm, pd0, pd1, span, arm;
@@ -1344,6 +1552,13 @@ u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
	pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ?
		(arm + 1 - span_row_size) : arm + 1, drv_map);

	/* Get PD1 Dev Handle */

	pd1_dev_handle = MR_PdDevHandleGet(pd1, drv_map);

	if (pd1_dev_handle == MR_PD_INVALID) {
		bestArm = arm;
	} else {
		/* get the pending cmds for the data and mirror arms */
		pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]);
		pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]);
@@ -1353,14 +1568,20 @@ u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
		diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]);
		bestArm = (diff0 <= diff1 ? arm : arm ^ 1);

		/* Make balance count from 16 to 4 to
		 *  keep driver in sync with Firmware
		 */
		if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds)  ||
			(bestArm != arm && pend1 > pend0 + lb_pending_cmds))
			bestArm ^= 1;

		/* Update the last accessed block on the correct pd */
		io_info->span_arm =
			(span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm;
		io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1;
	}

	lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1;
	io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm;
#if SPAN_DEBUG
	if (arm != bestArm)
		dev_dbg(&instance->pdev->dev, "LSI Debug R1 Load balance "
+178 −47

File changed.

Preview size limit exceeded, changes collapsed.

+206 −34

File changed.

Preview size limit exceeded, changes collapsed.