Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e78192f7 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mhi: core: move certain logs to controller log buffer"

parents a9e3b4c7 6a004f47
Loading
Loading
Loading
Loading
+49 −37
Original line number Diff line number Diff line
@@ -49,16 +49,18 @@ struct arch_info {
#define DLOG "Dev->Host: "
#define HLOG "Host: "

#define MHI_TSYNC_LOG_PAGES (10)
#define MHI_TSYNC_LOG_PAGES (2)

#ifdef CONFIG_MHI_DEBUG

#define MHI_IPC_LOG_PAGES (100)
#define MHI_CNTRL_LOG_PAGES (25)
enum MHI_DEBUG_LEVEL  mhi_ipc_log_lvl = MHI_MSG_LVL_VERBOSE;

#else

#define MHI_IPC_LOG_PAGES (10)
#define MHI_CNTRL_LOG_PAGES (5)
enum MHI_DEBUG_LEVEL  mhi_ipc_log_lvl = MHI_MSG_LVL_ERROR;

#endif
@@ -153,7 +155,7 @@ static void mhi_arch_pci_link_state_cb(struct msm_pcie_notify *notify)

	switch (notify->event) {
	case MSM_PCIE_EVENT_WAKEUP:
		MHI_LOG("Received MSM_PCIE_EVENT_WAKE signal\n");
		MHI_CNTRL_LOG("Received PCIE_WAKE signal\n");

		/* bring link out of d3cold */
		if (mhi_dev->powered_on) {
@@ -162,13 +164,13 @@ static void mhi_arch_pci_link_state_cb(struct msm_pcie_notify *notify)
		}
		break;
	case MSM_PCIE_EVENT_L1SS_TIMEOUT:
		MHI_VERB("Received MSM_PCIE_EVENT_L1SS_TIMEOUT signal\n");
		MHI_VERB("Received PCIE_L1SS_TIMEOUT signal\n");

		pm_runtime_mark_last_busy(&pci_dev->dev);
		pm_request_autosuspend(&pci_dev->dev);
		break;
	default:
		MHI_ERR("Unhandled event 0x%x\n", notify->event);
		MHI_CNTRL_LOG("Unhandled event 0x%x\n", notify->event);
	}
}

@@ -181,12 +183,12 @@ static int mhi_arch_esoc_ops_power_on(void *priv, unsigned int flags)

	mutex_lock(&mhi_cntrl->pm_mutex);
	if (mhi_dev->powered_on) {
		MHI_LOG("MHI still in active state\n");
		MHI_CNTRL_LOG("MHI still in active state\n");
		mutex_unlock(&mhi_cntrl->pm_mutex);
		return 0;
	}

	MHI_LOG("Enter\n");
	MHI_CNTRL_LOG("Enter: mdm_crashed:%d\n", flags & ESOC_HOOK_MDM_CRASH);

	/* reset rpm state */
	pm_runtime_set_active(&pci_dev->dev);
@@ -195,7 +197,7 @@ static int mhi_arch_esoc_ops_power_on(void *priv, unsigned int flags)
	pm_runtime_forbid(&pci_dev->dev);
	ret = pm_runtime_get_sync(&pci_dev->dev);
	if (ret < 0) {
		MHI_ERR("Error with rpm resume, ret:%d\n", ret);
		MHI_CNTRL_ERR("Error with rpm resume, ret:%d\n", ret);
		return ret;
	}

@@ -203,7 +205,7 @@ static int mhi_arch_esoc_ops_power_on(void *priv, unsigned int flags)
	ret = msm_pcie_pm_control(MSM_PCIE_RESUME, pci_dev->bus->number,
				  pci_dev, NULL, 0);
	if (ret) {
		MHI_ERR("Failed to resume pcie bus ret %d\n", ret);
		MHI_CNTRL_ERR("Failed to resume pcie bus ret %d\n", ret);
		return ret;
	}

@@ -216,7 +218,7 @@ static void mhi_arch_link_off(struct mhi_controller *mhi_cntrl)
	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
	struct pci_dev *pci_dev = mhi_dev->pci_dev;

	MHI_LOG("Entered\n");
	MHI_CNTRL_LOG("Entered\n");

	pci_set_power_state(pci_dev, PCI_D3hot);

@@ -224,7 +226,7 @@ static void mhi_arch_link_off(struct mhi_controller *mhi_cntrl)
	msm_pcie_pm_control(MSM_PCIE_SUSPEND, mhi_cntrl->bus, pci_dev, NULL, 0);
	mhi_arch_set_bus_request(mhi_cntrl, 0);

	MHI_LOG("Exited\n");
	MHI_CNTRL_LOG("Exited\n");
}

static void mhi_arch_esoc_ops_power_off(void *priv, unsigned int flags)
@@ -235,7 +237,7 @@ static void mhi_arch_esoc_ops_power_off(void *priv, unsigned int flags)
	struct arch_info *arch_info = mhi_dev->arch_info;
	struct pci_dev *pci_dev = mhi_dev->pci_dev;

	MHI_LOG("Enter: mdm_crashed:%d\n", mdm_state);
	MHI_CNTRL_LOG("Enter: mdm_crashed:%d\n", mdm_state);

	/*
	 * Abort system suspend if system is preparing to go to suspend
@@ -251,7 +253,7 @@ static void mhi_arch_esoc_ops_power_off(void *priv, unsigned int flags)

	mutex_lock(&mhi_cntrl->pm_mutex);
	if (!mhi_dev->powered_on) {
		MHI_LOG("Not in active state\n");
		MHI_CNTRL_LOG("Not in active state\n");
		mutex_unlock(&mhi_cntrl->pm_mutex);
		pm_runtime_put_noidle(&pci_dev->dev);
		return;
@@ -261,7 +263,7 @@ static void mhi_arch_esoc_ops_power_off(void *priv, unsigned int flags)

	pm_runtime_put_noidle(&pci_dev->dev);

	MHI_LOG("Triggering shutdown process\n");
	MHI_CNTRL_LOG("Triggering shutdown process\n");
	mhi_power_down(mhi_cntrl, !mdm_state);

	/* turn the link off */
@@ -281,12 +283,10 @@ static void mhi_arch_esoc_ops_mdm_error(void *priv)
{
	struct mhi_controller *mhi_cntrl = priv;

	MHI_LOG("Enter: mdm asserted\n");
	MHI_CNTRL_LOG("Enter: mdm asserted\n");

	/* transition MHI state into error state */
	mhi_control_error(mhi_cntrl);

	MHI_LOG("Exit\n");
}

static void mhi_bl_dl_cb(struct mhi_device *mhi_device,
@@ -386,8 +386,9 @@ static int mhi_arch_pcie_scale_bw(struct mhi_controller *mhi_cntrl,
	/* do a bus scale vote based on gen speeds */
	mhi_arch_set_bus_request(mhi_cntrl, link_info->target_link_speed);

	MHI_VERB("bw changed to speed:0x%x width:0x%x\n",
		 link_info->target_link_speed, link_info->target_link_width);
	MHI_LOG("BW changed to speed:0x%x width:0x%x\n",
		link_info->target_link_speed,
		link_info->target_link_width);

	return 0;
}
@@ -414,7 +415,7 @@ static int mhi_bl_probe(struct mhi_device *mhi_device,
		 mhi_device->slot);

	arch_info->boot_dev = mhi_device;
	arch_info->boot_ipc_log = ipc_log_context_create(MHI_IPC_LOG_PAGES,
	arch_info->boot_ipc_log = ipc_log_context_create(MHI_CNTRL_LOG_PAGES,
							 node_name, 0);
	ipc_log_string(arch_info->boot_ipc_log, HLOG
		       "Entered SBL, Session ID:0x%x\n", mhi_cntrl->session_id);
@@ -466,6 +467,12 @@ int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
							    node, 0);
		mhi_cntrl->log_lvl = mhi_ipc_log_lvl;

		snprintf(node, sizeof(node), "mhi_cntrl_%04x_%02u.%02u.%02u",
			 mhi_cntrl->dev_id, mhi_cntrl->domain, mhi_cntrl->bus,
			 mhi_cntrl->slot);
		mhi_cntrl->cntrl_log_buf = ipc_log_context_create(
						MHI_CNTRL_LOG_PAGES, node, 0);

		snprintf(node, sizeof(node), "mhi_tsync_%04x_%02u.%02u.%02u",
			 mhi_cntrl->dev_id, mhi_cntrl->domain, mhi_cntrl->bus,
			 mhi_cntrl->slot);
@@ -496,7 +503,8 @@ int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
		reg_event->notify.data = mhi_cntrl;
		ret = msm_pcie_register_event(reg_event);
		if (ret)
			MHI_LOG("Failed to reg. for link up notification\n");
			MHI_CNTRL_ERR(
				"Failed to reg. for link up notification\n");

		init_completion(&arch_info->pm_completion);

@@ -513,7 +521,7 @@ int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
		arch_info->esoc_client = devm_register_esoc_client(
						&mhi_dev->pci_dev->dev, "mdm");
		if (IS_ERR_OR_NULL(arch_info->esoc_client)) {
			MHI_ERR("Failed to register esoc client\n");
			MHI_CNTRL_ERR("Failed to register esoc client\n");
		} else {
			/* register for power on/off hooks */
			struct esoc_client_hook *esoc_ops =
@@ -531,7 +539,7 @@ int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
			ret = esoc_register_client_hook(arch_info->esoc_client,
							esoc_ops);
			if (ret)
				MHI_ERR("Failed to register esoc ops\n");
				MHI_CNTRL_ERR("Failed to register esoc ops\n");
		}

		/*
@@ -587,7 +595,7 @@ static struct dma_iommu_mapping *mhi_arch_create_iommu_mapping(
		size = (mhi_dev->iova_stop - base) + 1;
	}

	MHI_LOG("Create iommu mapping of base:%pad size:%zu\n",
	MHI_CNTRL_LOG("Create iommu mapping of base:%pad size:%zu\n",
			&base, size);
	return arm_iommu_create_mapping(&pci_bus_type, base, size);
}
@@ -614,7 +622,7 @@ int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl)
		ret = iommu_domain_set_attr(mapping->domain,
					    DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
		if (ret) {
			MHI_ERR("Failed to set attribute S1_BYPASS\n");
			MHI_CNTRL_ERR("Failed to set attribute S1_BYPASS\n");
			goto release_mapping;
		}
	}
@@ -625,7 +633,7 @@ int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl)
		ret = iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST,
					    &fast_map);
		if (ret) {
			MHI_ERR("Failed to set attribute FAST_MAP\n");
			MHI_CNTRL_ERR("Failed to set attribute FAST_MAP\n");
			goto release_mapping;
		}
	}
@@ -636,7 +644,7 @@ int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl)
		ret = iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_ATOMIC,
					    &atomic);
		if (ret) {
			MHI_ERR("Failed to set attribute ATOMIC\n");
			MHI_CNTRL_ERR("Failed to set attribute ATOMIC\n");
			goto release_mapping;
		}
	}
@@ -648,7 +656,8 @@ int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl)
					DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT,
					&force_coherent);
		if (ret) {
			MHI_ERR("Failed to set attribute FORCE_COHERENT\n");
			MHI_CNTRL_ERR(
				"Failed to set attribute FORCE_COHERENT\n");
			goto release_mapping;
		}
	}
@@ -657,7 +666,7 @@ int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl)
		ret = arm_iommu_attach_device(&mhi_dev->pci_dev->dev, mapping);

		if (ret) {
			MHI_ERR("Error attach device, ret:%d\n", ret);
			MHI_CNTRL_ERR("Error attach device, ret:%d\n", ret);
			goto release_mapping;
		}
		arch_info->mapping = mapping;
@@ -667,7 +676,7 @@ int mhi_arch_iommu_init(struct mhi_controller *mhi_cntrl)

	ret = dma_set_mask_and_coherent(mhi_cntrl->dev, DMA_BIT_MASK(64));
	if (ret) {
		MHI_ERR("Error setting dma mask, ret:%d\n", ret);
		MHI_CNTRL_ERR("Error setting dma mask, ret:%d\n", ret);
		goto release_device;
	}

@@ -703,7 +712,8 @@ int mhi_arch_link_suspend(struct mhi_controller *mhi_cntrl)
	struct pci_dev *pci_dev = mhi_dev->pci_dev;
	int ret = 0;

	MHI_LOG("Entered\n");
	MHI_LOG("Entered with suspend_mode:%s\n",
		TO_MHI_SUSPEND_MODE_STR(mhi_dev->suspend_mode));

	/* disable inactivity timer */
	if (!mhi_dev->allow_m1)
@@ -714,7 +724,8 @@ int mhi_arch_link_suspend(struct mhi_controller *mhi_cntrl)
		pci_clear_master(pci_dev);
		ret = pci_save_state(mhi_dev->pci_dev);
		if (ret) {
			MHI_ERR("Failed with pci_save_state, ret:%d\n", ret);
			MHI_CNTRL_ERR("Failed with pci_save_state, ret:%d\n",
				      ret);
			goto exit_suspend;
		}

@@ -731,6 +742,7 @@ int mhi_arch_link_suspend(struct mhi_controller *mhi_cntrl)
	case MHI_FAST_LINK_OFF:
	case MHI_ACTIVE_STATE:
	case MHI_FAST_LINK_ON:/* keeping link on do nothing */
	default:
		break;
	}

@@ -751,8 +763,6 @@ static int __mhi_arch_link_resume(struct mhi_controller *mhi_cntrl)
	struct mhi_link_info *cur_info = &mhi_cntrl->mhi_link_info;
	int ret;

	MHI_LOG("Entered\n");

	/* request bus scale voting based on higher gen speed */
	ret = mhi_arch_set_bus_request(mhi_cntrl,
				       cur_info->target_link_speed);
@@ -794,7 +804,8 @@ int mhi_arch_link_resume(struct mhi_controller *mhi_cntrl)
	struct pci_dev *pci_dev = mhi_dev->pci_dev;
	int ret = 0;

	MHI_LOG("Entered\n");
	MHI_LOG("Entered with suspend_mode:%s\n",
		TO_MHI_SUSPEND_MODE_STR(mhi_dev->suspend_mode));

	switch (mhi_dev->suspend_mode) {
	case MHI_DEFAULT_SUSPEND:
@@ -803,6 +814,7 @@ int mhi_arch_link_resume(struct mhi_controller *mhi_cntrl)
	case MHI_FAST_LINK_OFF:
	case MHI_ACTIVE_STATE:
	case MHI_FAST_LINK_ON:
	default:
		break;
	}

@@ -814,9 +826,9 @@ int mhi_arch_link_resume(struct mhi_controller *mhi_cntrl)
	if (!mhi_dev->allow_m1)
		msm_pcie_l1ss_timeout_enable(pci_dev);

	MHI_LOG("Exited\n");
	MHI_LOG("Exited with ret:%d\n", ret);

	return 0;
	return ret;
}

int mhi_arch_link_lpm_disable(struct mhi_controller *mhi_cntrl)
+27 −15
Original line number Diff line number Diff line
@@ -44,12 +44,19 @@ static const struct firmware_info firmware_table[] = {
static int debug_mode;
module_param_named(debug_mode, debug_mode, int, 0644);

const char * const mhi_suspend_mode_str[MHI_SUSPEND_MODE_MAX] = {
	[MHI_ACTIVE_STATE] = "Active",
	[MHI_DEFAULT_SUSPEND] = "Default",
	[MHI_FAST_LINK_OFF] = "Fast Link Off",
	[MHI_FAST_LINK_ON] = "Fast Link On",
};

int mhi_debugfs_trigger_m0(void *data, u64 val)
{
	struct mhi_controller *mhi_cntrl = data;
	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);

	MHI_LOG("Trigger M3 Exit\n");
	MHI_CNTRL_LOG("Trigger M3 Exit\n");
	pm_runtime_get(&mhi_dev->pci_dev->dev);
	pm_runtime_put(&mhi_dev->pci_dev->dev);

@@ -63,7 +70,7 @@ int mhi_debugfs_trigger_m3(void *data, u64 val)
	struct mhi_controller *mhi_cntrl = data;
	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);

	MHI_LOG("Trigger M3 Entry\n");
	MHI_CNTRL_LOG("Trigger M3 Entry\n");
	pm_runtime_mark_last_busy(&mhi_dev->pci_dev->dev);
	pm_request_autosuspend(&mhi_dev->pci_dev->dev);

@@ -102,19 +109,19 @@ static int mhi_init_pci_dev(struct mhi_controller *mhi_cntrl)
	mhi_dev->resn = MHI_PCI_BAR_NUM;
	ret = pci_assign_resource(pci_dev, mhi_dev->resn);
	if (ret) {
		MHI_ERR("Error assign pci resources, ret:%d\n", ret);
		MHI_CNTRL_ERR("Error assign pci resources, ret:%d\n", ret);
		return ret;
	}

	ret = pci_enable_device(pci_dev);
	if (ret) {
		MHI_ERR("Error enabling device, ret:%d\n", ret);
		MHI_CNTRL_ERR("Error enabling device, ret:%d\n", ret);
		goto error_enable_device;
	}

	ret = pci_request_region(pci_dev, mhi_dev->resn, "mhi");
	if (ret) {
		MHI_ERR("Error pci_request_region, ret:%d\n", ret);
		MHI_CNTRL_ERR("Error pci_request_region, ret:%d\n", ret);
		goto error_request_region;
	}

@@ -124,14 +131,14 @@ static int mhi_init_pci_dev(struct mhi_controller *mhi_cntrl)
	len = pci_resource_len(pci_dev, mhi_dev->resn);
	mhi_cntrl->regs = ioremap_nocache(mhi_cntrl->base_addr, len);
	if (!mhi_cntrl->regs) {
		MHI_ERR("Error ioremap region\n");
		MHI_CNTRL_ERR("Error ioremap region\n");
		goto error_ioremap;
	}

	ret = pci_alloc_irq_vectors(pci_dev, mhi_cntrl->msi_required,
				    mhi_cntrl->msi_required, PCI_IRQ_MSI);
	if (IS_ERR_VALUE((ulong)ret) || ret < mhi_cntrl->msi_required) {
		MHI_ERR("Failed to enable MSI, ret:%d\n", ret);
		MHI_CNTRL_ERR("Failed to enable MSI, ret:%d\n", ret);
		goto error_req_msi;
	}

@@ -381,7 +388,7 @@ static int mhi_force_suspend(struct mhi_controller *mhi_cntrl)
	struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
	int itr = DIV_ROUND_UP(mhi_cntrl->timeout_ms, delayms);

	MHI_LOG("Entered\n");
	MHI_CNTRL_LOG("Entered\n");

	mutex_lock(&mhi_cntrl->pm_mutex);

@@ -397,19 +404,19 @@ static int mhi_force_suspend(struct mhi_controller *mhi_cntrl)
		if (!ret || ret != -EBUSY)
			break;

		MHI_LOG("MHI busy, sleeping and retry\n");
		MHI_CNTRL_LOG("MHI busy, sleeping and retry\n");
		msleep(delayms);
	}

	if (ret)
	if (ret) {
		MHI_CNTRL_ERR("Force suspend ret:%d\n", ret);
		goto exit_force_suspend;
	}

	mhi_dev->suspend_mode = MHI_DEFAULT_SUSPEND;
	ret = mhi_arch_link_suspend(mhi_cntrl);

exit_force_suspend:
	MHI_LOG("Force suspend ret with %d\n", ret);

	mutex_unlock(&mhi_cntrl->pm_mutex);

	return ret;
@@ -541,13 +548,15 @@ static void mhi_status_cb(struct mhi_controller *mhi_cntrl,
		 */
		pm_runtime_get(dev);
		ret = mhi_force_suspend(mhi_cntrl);
		if (!ret)
		if (!ret) {
			MHI_CNTRL_LOG("Attempt resume after forced suspend\n");
			mhi_runtime_resume(dev);
		}
		pm_runtime_put(dev);
		mhi_arch_mission_mode_enter(mhi_cntrl);
		break;
	default:
		MHI_ERR("Unhandled cb:0x%x\n", reason);
		MHI_CNTRL_LOG("Unhandled cb:0x%x\n", reason);
	}
}

@@ -773,6 +782,9 @@ static struct mhi_controller *mhi_register_controller(struct pci_dev *pci_dev)
	atomic_set(&mhi_cntrl->write_idx, -1);

skip_offload:
	if (sysfs_create_group(&mhi_cntrl->mhi_dev->dev.kobj, &mhi_qcom_group))
		MHI_CNTRL_ERR("Error while creating the sysfs group\n");

	return mhi_cntrl;

error_free_wq:
@@ -826,7 +838,7 @@ int mhi_pci_probe(struct pci_dev *pci_dev,

	pm_runtime_mark_last_busy(&pci_dev->dev);

	MHI_LOG("Return successful\n");
	MHI_CNTRL_LOG("Return successful\n");

	return 0;

+4 −1
Original line number Diff line number Diff line
@@ -48,9 +48,12 @@ enum mhi_suspend_mode {
	MHI_DEFAULT_SUSPEND,
	MHI_FAST_LINK_OFF,
	MHI_FAST_LINK_ON,
	MHI_SUSPEND_MODE_MAX,
};

#define MHI_IS_SUSPENDED(mode) (mode)
extern const char * const mhi_suspend_mode_str[MHI_SUSPEND_MODE_MAX];
#define TO_MHI_SUSPEND_MODE_STR(mode) \
	(mode >= MHI_SUSPEND_MODE_MAX ? "Invalid" : mhi_suspend_mode_str[mode])

struct mhi_dev {
	struct pci_dev *pci_dev;
+53 −52
Original line number Diff line number Diff line
@@ -60,7 +60,7 @@ static void mhi_process_sfr(struct mhi_controller *mhi_cntrl,
		rem_seg_len = 0;
		seg_idx++;
		if (seg_idx == mhi_cntrl->rddm_image->entries) {
			MHI_ERR("invalid size for SFR file\n");
			MHI_CNTRL_ERR("invalid size for SFR file\n");
			goto err;
		}
	}
@@ -89,7 +89,7 @@ static int mhi_find_next_file_offset(struct mhi_controller *mhi_cntrl,
	while (info->file_size) {
		info->seg_idx++;
		if (info->seg_idx == mhi_cntrl->rddm_image->entries) {
			MHI_ERR("invalid size for file %s\n",
			MHI_CNTRL_ERR("invalid size for file %s\n",
					table_info->file_name);
			return -EINVAL;
		}
@@ -118,14 +118,14 @@ void mhi_dump_sfr(struct mhi_controller *mhi_cntrl)

	if (rddm_header->header_size > sizeof(*rddm_header) ||
			rddm_header->header_size < 8) {
		MHI_ERR("invalid reported header size %u\n",
		MHI_CNTRL_ERR("invalid reported header size %u\n",
				rddm_header->header_size);
		return;
	}

	table_size = (rddm_header->header_size - 8) / sizeof(*table_info);
	if (!table_size) {
		MHI_ERR("invalid rddm table size %u\n", table_size);
		MHI_CNTRL_ERR("invalid rddm table size %u\n", table_size);
		return;
	}

@@ -157,13 +157,13 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
	int i = 0;

	for (i = 0; i < img_info->entries - 1; i++, mhi_buf++, bhi_vec++) {
		MHI_VERB("Setting vector:%pad size:%zu\n",
		MHI_CNTRL_LOG("Setting vector:%pad size:%zu\n",
				&mhi_buf->dma_addr, mhi_buf->len);
		bhi_vec->dma_addr = mhi_buf->dma_addr;
		bhi_vec->size = mhi_buf->len;
	}

	MHI_LOG("BHIe programming for RDDM\n");
	MHI_CNTRL_LOG("BHIe programming for RDDM\n");

	mhi_cntrl->write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
		      upper_32_bits(mhi_buf->dma_addr));
@@ -182,7 +182,7 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
			    BHIE_RXVECDB_SEQNUM_BMSK, BHIE_RXVECDB_SEQNUM_SHFT,
			    sequence_id);

	MHI_LOG("address:%pad len:0x%lx sequence:%u\n",
	MHI_CNTRL_LOG("address:%pad len:0x%lx sequence:%u\n",
			&mhi_buf->dma_addr, mhi_buf->len, sequence_id);
}

@@ -198,7 +198,7 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
	int rddm_retry = rddm_timeout_us / delayus; /* time to enter rddm */
	void __iomem *base = mhi_cntrl->bhie;

	MHI_LOG("Entered with pm_state:%s dev_state:%s ee:%s\n",
	MHI_CNTRL_LOG("Entered with pm_state:%s dev_state:%s ee:%s\n",
			to_mhi_pm_state_str(mhi_cntrl->pm_state),
			TO_MHI_STATE_STR(mhi_cntrl->dev_state),
			TO_MHI_EXEC_STR(mhi_cntrl->ee));
@@ -226,10 +226,10 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
	ee = mhi_get_exec_env(mhi_cntrl);
	if (ee != MHI_EE_RDDM) {

		MHI_LOG("Trigger device into RDDM mode using SYSERR\n");
		MHI_CNTRL_LOG("Trigger device into RDDM mode using SYSERR\n");
		mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);

		MHI_LOG("Waiting for device to enter RDDM\n");
		MHI_CNTRL_LOG("Waiting for device to enter RDDM\n");
		while (rddm_retry--) {
			ee = mhi_get_exec_env(mhi_cntrl);
			if (ee == MHI_EE_RDDM)
@@ -240,7 +240,7 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)

		if (rddm_retry <= 0) {
			/* Hardware reset; force device to enter rddm */
			MHI_LOG(
			MHI_CNTRL_LOG(
				"Did not enter RDDM, do a host req. reset\n");
			mhi_cntrl->write_reg(mhi_cntrl, mhi_cntrl->regs,
				      MHI_SOC_RESET_REQ_OFFSET,
@@ -251,7 +251,7 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
		ee = mhi_get_exec_env(mhi_cntrl);
	}

	MHI_LOG("Waiting for image download completion, current EE:%s\n",
	MHI_CNTRL_LOG("Waiting for image download completion, current EE:%s\n",
			TO_MHI_EXEC_STR(ee));
	while (retry--) {
		ret = mhi_read_reg_field(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS,
@@ -262,7 +262,7 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
			return -EIO;

		if (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) {
			MHI_LOG("RDDM successfully collected\n");
			MHI_CNTRL_LOG("RDDM successfully collected\n");
			return 0;
		}

@@ -272,9 +272,9 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
	ee = mhi_get_exec_env(mhi_cntrl);
	ret = mhi_read_reg(mhi_cntrl, base, BHIE_RXVECSTATUS_OFFS, &rx_status);

	MHI_ERR("Did not complete RDDM transfer\n");
	MHI_ERR("Current EE:%s\n", TO_MHI_EXEC_STR(ee));
	MHI_ERR("RXVEC_STATUS:0x%x, ret:%d\n", rx_status, ret);
	MHI_CNTRL_ERR("Did not complete RDDM transfer\n");
	MHI_CNTRL_ERR("Current EE:%s\n", TO_MHI_EXEC_STR(ee));
	MHI_CNTRL_ERR("RXVEC_STATUS:0x%x, ret:%d\n", rx_status, ret);

	return -EIO;
}
@@ -288,7 +288,7 @@ int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic)
	if (in_panic)
		return __mhi_download_rddm_in_panic(mhi_cntrl);

	MHI_LOG("Waiting for image download completion\n");
	MHI_CNTRL_LOG("Waiting for image download completion\n");

	/* waiting for image download completion */
	wait_event_timeout(mhi_cntrl->state_event,
@@ -316,7 +316,7 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
		return -EIO;
	}

	MHI_LOG("Starting BHIe Programming\n");
	MHI_CNTRL_LOG("Starting BHIe Programming\n");

	mhi_cntrl->write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
		      upper_32_bits(mhi_buf->dma_addr));
@@ -336,11 +336,11 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
			    mhi_cntrl->sequence_id);
	read_unlock_bh(pm_lock);

	MHI_LOG("Upper:0x%x Lower:0x%x len:0x%lx sequence:%u\n",
	MHI_CNTRL_LOG("Upper:0x%x Lower:0x%x len:0x%lx sequence:%u\n",
			upper_32_bits(mhi_buf->dma_addr),
			lower_32_bits(mhi_buf->dma_addr),
			mhi_buf->len, mhi_cntrl->sequence_id);
	MHI_LOG("Waiting for image transfer completion\n");
	MHI_CNTRL_LOG("Waiting for image transfer completion\n");

	/* waiting for image download completion */
	wait_event_timeout(mhi_cntrl->state_event,
@@ -377,7 +377,7 @@ static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
		{ NULL },
	};

	MHI_LOG("Starting BHI programming\n");
	MHI_CNTRL_LOG("Starting BHI programming\n");

	/* program start sbl download via  bhi protocol */
	read_lock_bh(pm_lock);
@@ -400,7 +400,7 @@ static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
			mhi_cntrl->session_id);
	read_unlock_bh(pm_lock);

	MHI_LOG("Waiting for image transfer completion\n");
	MHI_CNTRL_LOG("Waiting for image transfer completion\n");

	/* waiting for image download completion */
	wait_event_timeout(mhi_cntrl->state_event,
@@ -413,7 +413,7 @@ static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
		goto invalid_pm_state;

	if (tx_status == BHI_STATUS_ERROR) {
		MHI_ERR("Image transfer failed\n");
		MHI_CNTRL_ERR("Image transfer failed\n");
		read_lock_bh(pm_lock);
		if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
			for (i = 0; error_reg[i].name; i++) {
@@ -421,7 +421,7 @@ static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
						   error_reg[i].offset, &val);
				if (ret)
					break;
				MHI_ERR("reg:%s value:0x%x\n",
				MHI_CNTRL_ERR("reg:%s value:0x%x\n",
					      error_reg[i].name, val);
			}
		}
@@ -461,7 +461,7 @@ int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
	struct image_info *img_info;
	struct mhi_buf *mhi_buf;

	MHI_LOG("Allocating bytes:%zu seg_size:%zu total_seg:%u\n",
	MHI_CNTRL_LOG("Allocating bytes:%zu seg_size:%zu total_seg:%u\n",
			alloc_size, seg_size, segments);

	img_info = kzalloc(sizeof(*img_info), GFP_KERNEL);
@@ -489,7 +489,7 @@ int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
		if (!mhi_buf->buf)
			goto error_alloc_segment;

		MHI_LOG("Entry:%d Address:0x%llx size:%lu\n", i,
		MHI_CNTRL_LOG("Entry:%d Address:0x%llx size:%lu\n", i,
			mhi_buf->dma_addr, mhi_buf->len);
	}

@@ -497,7 +497,7 @@ int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
	img_info->entries = segments;
	*image_info = img_info;

	MHI_LOG("Successfully allocated bhi vec table\n");
	MHI_CNTRL_LOG("Successfully allocated bhi vec table\n");

	return 0;

@@ -552,11 +552,11 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
	size_t size;

	if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
		MHI_ERR("MHI is not in valid state\n");
		MHI_CNTRL_ERR("MHI is not in valid state\n");
		return;
	}

	MHI_LOG("Device current EE:%s\n", TO_MHI_EXEC_STR(mhi_cntrl->ee));
	MHI_CNTRL_LOG("Device current EE:%s\n", TO_MHI_EXEC_STR(mhi_cntrl->ee));

	/* if device in pthru, do reset to ready state transition */
	if (mhi_cntrl->ee == MHI_EE_PTHRU)
@@ -567,13 +567,14 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)

	if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size ||
						     !mhi_cntrl->seg_len))) {
		MHI_ERR("No firmware image defined or !sbl_size || !seg_len\n");
		MHI_CNTRL_ERR(
			"No firmware image defined or !sbl_size || !seg_len\n");
		return;
	}

	ret = request_firmware(&firmware, fw_name, mhi_cntrl->dev);
	if (ret) {
		MHI_ERR("Error loading firmware, ret:%d\n", ret);
		MHI_CNTRL_ERR("Error loading firmware, ret:%d\n", ret);
		return;
	}

@@ -585,7 +586,7 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)

	buf = mhi_alloc_coherent(mhi_cntrl, size, &dma_addr, GFP_KERNEL);
	if (!buf) {
		MHI_ERR("Could not allocate memory for image\n");
		MHI_CNTRL_ERR("Could not allocate memory for image\n");
		release_firmware(firmware);
		return;
	}
@@ -614,11 +615,11 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
		ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image,
					   firmware->size);
		if (ret) {
			MHI_ERR("Error alloc size of %zu\n", firmware->size);
			MHI_CNTRL_ERR("Error alloc size:%zu\n", firmware->size);
			goto error_alloc_fw_table;
		}

		MHI_LOG("Copying firmware image into vector table\n");
		MHI_CNTRL_LOG("Copying firmware image into vector table\n");

		/* load the firmware into BHIE vec table */
		mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image);
@@ -628,7 +629,7 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
	/* transitioning into MHI RESET->READY state */
	ret = mhi_ready_state_transition(mhi_cntrl);

	MHI_LOG("To Reset->Ready PM_STATE:%s MHI_STATE:%s EE:%s, ret:%d\n",
	MHI_CNTRL_LOG("To Reset->Ready PM_STATE:%s MHI_STATE:%s EE:%s ret:%d\n",
			to_mhi_pm_state_str(mhi_cntrl->pm_state),
			TO_MHI_STATE_STR(mhi_cntrl->dev_state),
			TO_MHI_EXEC_STR(mhi_cntrl->ee), ret);
@@ -637,7 +638,7 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
		return;

	if (ret) {
		MHI_ERR("Did not transition to READY state\n");
		MHI_CNTRL_ERR("Did not transition to READY state\n");
		goto error_read;
	}

@@ -648,7 +649,7 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
				 msecs_to_jiffies(mhi_cntrl->timeout_ms));

	if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
		MHI_ERR("MHI did not enter BHIE\n");
		MHI_CNTRL_ERR("MHI did not enter BHIE\n");
		goto error_read;
	}

@@ -658,7 +659,7 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
			       /* last entry is vec table */
			       &image_info->mhi_buf[image_info->entries - 1]);

	MHI_LOG("amss fw_load, ret:%d\n", ret);
	MHI_CNTRL_LOG("amss fw_load ret:%d\n", ret);

	release_firmware(firmware);

+15 −15
Original line number Diff line number Diff line
@@ -425,7 +425,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
				  mhi_msi_handlr, IRQF_SHARED | IRQF_NO_SUSPEND,
				  "mhi", mhi_event);
		if (ret) {
			MHI_ERR("Error requesting irq:%d for ev:%d\n",
			MHI_CNTRL_ERR("Error requesting irq:%d for ev:%d\n",
					mhi_cntrl->irq[mhi_event->msi], i);
			goto error_request;
		}
@@ -748,7 +748,7 @@ static int mhi_init_timesync(struct mhi_controller *mhi_cntrl)
	ret = mhi_get_capability_offset(mhi_cntrl, TIMESYNC_CAP_ID,
					&time_offset);
	if (ret) {
		MHI_LOG("No timesync capability found\n");
		MHI_CNTRL_LOG("No timesync capability found\n");
		return ret;
	}

@@ -761,7 +761,7 @@ static int mhi_init_timesync(struct mhi_controller *mhi_cntrl)
	INIT_LIST_HEAD(&mhi_tsync->head);

	/* save time_offset for obtaining time */
	MHI_LOG("TIME OFFS:0x%x\n", time_offset);
	MHI_CNTRL_LOG("TIME OFFS:0x%x\n", time_offset);
	mhi_tsync->time_reg = mhi_cntrl->regs + time_offset
			      + TIMESYNC_TIME_LOW_OFFSET;

@@ -770,7 +770,7 @@ static int mhi_init_timesync(struct mhi_controller *mhi_cntrl)
	/* get timesync event ring configuration */
	er_index = mhi_get_er_index(mhi_cntrl, MHI_ER_TSYNC_ELEMENT_TYPE);
	if (er_index < 0) {
		MHI_LOG("Could not find timesync event ring\n");
		MHI_CNTRL_LOG("Could not find timesync event ring\n");
		return er_index;
	}

@@ -799,7 +799,7 @@ int mhi_init_sfr(struct mhi_controller *mhi_cntrl)
	sfr_info->buf_addr = mhi_alloc_coherent(mhi_cntrl, sfr_info->len,
					&sfr_info->dma_addr, GFP_KERNEL);
	if (!sfr_info->buf_addr) {
		MHI_ERR("Failed to allocate memory for sfr\n");
		MHI_CNTRL_ERR("Failed to allocate memory for sfr\n");
		return -ENOMEM;
	}

@@ -807,14 +807,14 @@ int mhi_init_sfr(struct mhi_controller *mhi_cntrl)

	ret = mhi_send_cmd(mhi_cntrl, NULL, MHI_CMD_SFR_CFG);
	if (ret) {
		MHI_ERR("Failed to send sfr cfg cmd\n");
		MHI_CNTRL_ERR("Failed to send sfr cfg cmd\n");
		return ret;
	}

	ret = wait_for_completion_timeout(&sfr_info->completion,
			msecs_to_jiffies(mhi_cntrl->timeout_ms));
	if (!ret || sfr_info->ccs != MHI_EV_CC_SUCCESS) {
		MHI_ERR("Failed to get sfr cfg cmd completion\n");
		MHI_CNTRL_ERR("Failed to get sfr cfg cmd completion\n");
		return -EIO;
	}

@@ -842,7 +842,7 @@ static int mhi_init_bw_scale(struct mhi_controller *mhi_cntrl)

	bw_cfg_offset += BW_SCALE_CFG_OFFSET;

	MHI_LOG("BW_CFG OFFSET:0x%x\n", bw_cfg_offset);
	MHI_CNTRL_LOG("BW_CFG OFFSET:0x%x\n", bw_cfg_offset);

	/* advertise host support */
	mhi_cntrl->write_reg(mhi_cntrl, mhi_cntrl->regs, bw_cfg_offset,
@@ -931,7 +931,7 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
		{ 0, 0, 0 }
	};

	MHI_LOG("Initializing MMIO\n");
	MHI_CNTRL_LOG("Initializing MMIO\n");

	/* set up DB register for all the chan rings */
	ret = mhi_read_reg_field(mhi_cntrl, base, CHDBOFF, CHDBOFF_CHDBOFF_MASK,
@@ -939,7 +939,7 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
	if (ret)
		return -EIO;

	MHI_LOG("CHDBOFF:0x%x\n", val);
	MHI_CNTRL_LOG("CHDBOFF:0x%x\n", val);

	/* setup wake db */
	mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
@@ -962,7 +962,7 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
	if (ret)
		return -EIO;

	MHI_LOG("ERDBOFF:0x%x\n", val);
	MHI_CNTRL_LOG("ERDBOFF:0x%x\n", val);

	mhi_event = mhi_cntrl->mhi_event;
	for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
@@ -975,7 +975,7 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
	/* set up DB register for primary CMD rings */
	mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER;

	MHI_LOG("Programming all MMIO values.\n");
	MHI_CNTRL_LOG("Programming all MMIO values.\n");
	for (i = 0; reg_info[i].offset; i++)
		mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset,
				    reg_info[i].mask, reg_info[i].shift,
@@ -1710,7 +1710,7 @@ int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)

	ret = mhi_init_dev_ctxt(mhi_cntrl);
	if (ret) {
		MHI_ERR("Error with init dev_ctxt\n");
		MHI_CNTRL_ERR("Error with init dev_ctxt\n");
		goto error_dev_ctxt;
	}

@@ -1730,7 +1730,7 @@ int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
			ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
					   &bhie_off);
			if (ret) {
				MHI_ERR("Error getting bhie offset\n");
				MHI_CNTRL_ERR("Error getting bhie offset\n");
				goto bhie_error;
			}

Loading