Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 27db3b22 authored by Ravikishore Pampana's avatar Ravikishore Pampana
Browse files

msm: camera: tfe: Add pid and mid support for tfe



Process tfe page faults based on the pid and mid values.
When page fault happen kernel api gives the pid and mid value,
From pid and mid values TFE hw and tfe port that caused the
Page fault can be find. Added support for this in the tfe driver.
Based on the Pid and Mid values, get the HW id and port ids,
go through all context which has this hw id and port id and log
the data. Once context id is identified, log the acquire data and
last consumed client address details. Dump the hw register data
in the given buffer. Send the smmu page fault event through
v4l2 queue to user.

CRs-Fixed: 2775154
Change-Id: Iafaa7d1283ea0b836c223ed1fe6c419b0ac50c15
Signed-off-by: default avatarRavikishore Pampana <rpampana@codeaurora.org>
parent a2fdf71a
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -6689,7 +6689,6 @@ static void cam_ife_mgr_dump_pf_data(
	struct cam_hw_cmd_args *hw_cmd_args)
{
	struct cam_ife_hw_mgr_ctx *ctx;

	struct cam_isp_hw_mgr_res          *hw_mgr_res;
	struct cam_isp_hw_get_cmd_update    cmd_update;
	struct cam_isp_hw_get_res_for_mid   get_res;
+238 −69
Original line number Diff line number Diff line
@@ -1105,7 +1105,7 @@ static int cam_tfe_hw_mgr_acquire_res_tfe_in(
				continue;

			hw_intf = tfe_hw_mgr->tfe_devices[
				csid_res->hw_res[i]->hw_intf->hw_idx];
				csid_res->hw_res[i]->hw_intf->hw_idx]->hw_intf;

			/* fill in more acquire information as needed */
			/* slave Camif resource, */
@@ -2782,10 +2782,10 @@ static int cam_tfe_mgr_reset_tfe_hw(struct cam_tfe_hw_mgr *hw_mgr,
		if (!hw_mgr->tfe_devices[i])
			continue;

		if (hw_idx != hw_mgr->tfe_devices[i]->hw_idx)
		if (hw_idx != hw_mgr->tfe_devices[i]->hw_intf->hw_idx)
			continue;
		CAM_DBG(CAM_ISP, "TFE (id = %d) reset", hw_idx);
		tfe_hw_intf = hw_mgr->tfe_devices[i];
		tfe_hw_intf = hw_mgr->tfe_devices[i]->hw_intf;
		tfe_hw_intf->hw_ops.reset(tfe_hw_intf->hw_priv,
			&tfe_reset_type, sizeof(tfe_reset_type));
		break;
@@ -3089,7 +3089,7 @@ static int cam_tfe_mgr_user_dump_hw(
	rc = cam_tfe_mgr_handle_reg_dump(tfe_ctx,
		tfe_ctx->reg_dump_buf_desc,
		tfe_ctx->num_reg_dump_buf,
		CAM_ISP_PACKET_META_REG_DUMP_ON_ERROR,
		CAM_ISP_TFE_PACKET_META_REG_DUMP_ON_ERROR,
		&soc_dump_args,
		true);
	if (rc) {
@@ -4294,41 +4294,42 @@ static int cam_tfe_mgr_sof_irq_debug(
	return rc;
}

static void cam_tfe_mgr_print_io_bufs(struct cam_packet *packet,
	int32_t iommu_hdl, int32_t sec_mmu_hdl, uint32_t pf_buf_info,
	bool *mem_found)
static void cam_tfe_mgr_print_io_bufs(struct cam_tfe_hw_mgr  *hw_mgr,
		uint32_t res_id, struct cam_packet *packet,
		bool    *ctx_found, struct cam_tfe_hw_mgr_ctx *ctx)
{
	dma_addr_t  iova_addr;
	size_t      src_buf_size;
	int         i, j;
	int         rc = 0;
	int32_t     mmu_hdl;

	struct cam_buf_io_cfg  *io_cfg = NULL;
	int32_t      mmu_hdl, iommu_hdl, sec_mmu_hdl;
	dma_addr_t   iova_addr;
	size_t        src_buf_size;
	int  i, j, rc = 0;

	if (mem_found)
		*mem_found = false;
	iommu_hdl = hw_mgr->mgr_common.img_iommu_hdl;
	sec_mmu_hdl = hw_mgr->mgr_common.img_iommu_hdl_secure;

	io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload +
		packet->io_configs_offset / 4);

	for (i = 0; i < packet->num_io_configs; i++) {
		for (j = 0; j < CAM_PACKET_MAX_PLANES; j++) {
			if (!io_cfg[i].mem_handle[j])
		if (io_cfg[i].resource_type != res_id)
			continue;
		else
			break;
	}

			if (pf_buf_info &&
				GET_FD_FROM_HANDLE(io_cfg[i].mem_handle[j]) ==
				GET_FD_FROM_HANDLE(pf_buf_info)) {
				CAM_INFO(CAM_ISP,
					"Found PF at port: 0x%x mem 0x%x fd: 0x%x",
					io_cfg[i].resource_type,
					io_cfg[i].mem_handle[j],
					pf_buf_info);
				if (mem_found)
					*mem_found = true;
	if (i == packet->num_io_configs) {
		CAM_ERR(CAM_ISP,
			"getting io port for mid resource id failed ctx id:%d req id:%lld res id:0x%x",
			ctx->ctx_index, packet->header.request_id,
			res_id);
		return;
	}

	for (j = 0; j < CAM_PACKET_MAX_PLANES; j++) {
		if (!io_cfg[i].mem_handle[j])
			break;

		CAM_INFO(CAM_ISP, "port: 0x%x f: %u format: %d dir %d",
			io_cfg[i].resource_type,
			io_cfg[i].fence,
@@ -4365,6 +4366,178 @@ static void cam_tfe_mgr_print_io_bufs(struct cam_packet *packet,
			io_cfg[i].mem_handle[j]);
	}
}

static void cam_tfe_mgr_pf_dump(uint32_t res_id,
	struct cam_tfe_hw_mgr_ctx *ctx)
{
	struct cam_isp_hw_mgr_res        *hw_mgr_res;
	struct cam_hw_intf               *hw_intf;
	struct cam_isp_hw_get_cmd_update  cmd_update;
	uint32_t                          res_id_out;
	int  i, rc = 0;

	/* dump the registers  */
	rc = cam_tfe_mgr_handle_reg_dump(ctx, ctx->reg_dump_buf_desc,
		ctx->num_reg_dump_buf,
		CAM_ISP_TFE_PACKET_META_REG_DUMP_ON_ERROR, NULL, false);
	if (rc) {
		CAM_ERR(CAM_ISP,
			"Reg dump on pf failed req id: %llu rc: %d",
			ctx->applied_req_id, rc);
	}

	/* dump the acquire data */
	list_for_each_entry(hw_mgr_res, &ctx->res_list_tfe_csid, list) {
		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
			if (!hw_mgr_res->hw_res[i])
				continue;

			hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
			if (hw_intf && hw_intf->hw_ops.process_cmd) {
				rc = hw_intf->hw_ops.process_cmd(
					hw_intf->hw_priv,
					CAM_TFE_CSID_LOG_ACQUIRE_DATA,
					hw_mgr_res->hw_res[i],
					sizeof(void *));
				if (rc)
					CAM_ERR(CAM_ISP,
						"acquire dump data failed");
			} else
				CAM_ERR(CAM_ISP, "NULL hw_intf!");
		}
	}

	res_id_out = res_id & 0xFF;

	if (res_id_out >= CAM_TFE_HW_OUT_RES_MAX) {
		CAM_ERR(CAM_ISP, "Invalid out resource id :%x",
			res_id);
		return;
	}

	hw_mgr_res =
		&ctx->res_list_tfe_out[res_id_out];
	for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
		if (!hw_mgr_res->hw_res[i])
			continue;

		cmd_update.cmd_type = CAM_ISP_HW_CMD_DUMP_BUS_INFO;
		cmd_update.res = hw_mgr_res->hw_res[i];
		hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
		if (hw_intf->hw_ops.process_cmd) {
			rc = hw_intf->hw_ops.process_cmd(
				hw_intf->hw_priv,
				CAM_ISP_HW_CMD_DUMP_BUS_INFO,
				(void *)&cmd_update,
				sizeof(struct cam_isp_hw_get_cmd_update));
		}
	}
}

static void cam_tfe_mgr_dump_pf_data(
	struct cam_tfe_hw_mgr  *hw_mgr,
	struct cam_hw_cmd_args *hw_cmd_args)
{
	struct cam_tfe_hw_mgr_ctx           *ctx;
	struct cam_isp_hw_mgr_res           *hw_mgr_res;
	struct cam_isp_hw_get_cmd_update     cmd_update;
	struct cam_isp_hw_get_res_for_mid    get_res;
	struct cam_packet                   *packet;
	uint32_t  *resource_type;
	uint32_t   hw_id;
	bool      *ctx_found, hw_id_found = false;
	int        i, j, rc = 0;

	ctx = (struct cam_tfe_hw_mgr_ctx *)hw_cmd_args->ctxt_to_hw_map;

	packet  = hw_cmd_args->u.pf_args.pf_data.packet;
	ctx_found = hw_cmd_args->u.pf_args.ctx_found;
	resource_type = hw_cmd_args->u.pf_args.resource_type;

	if ((*ctx_found) && (*resource_type))
		goto outportlog;

	for (i = 0; i < CAM_TFE_HW_NUM_MAX; i++) {
		if (!g_tfe_hw_mgr.tfe_devices[i])
			continue;

		for (j = 0; j < g_tfe_hw_mgr.tfe_devices[i]->num_hw_pid; j++) {
			if (g_tfe_hw_mgr.tfe_devices[i]->hw_pid[j] ==
				hw_cmd_args->u.pf_args.pid) {
				hw_id_found = true;
				hw_id = i;
				break;
			}
		}
		if (hw_id_found)
			break;
	}

	if (i == CAM_TFE_HW_NUM_MAX) {
		CAM_INFO(CAM_ISP,
			"PID:%d  is not matching with any TFE HW PIDs ctx id:%d",
			hw_cmd_args->u.pf_args.pid,  ctx->ctx_index);
		return;
	}

	for (i = 0; i < ctx->num_base; i++) {
		if (ctx->base[i].idx == hw_id) {
			*ctx_found = true;
			break;
		}
	}

	if (!(*ctx_found)) {
		CAM_INFO(CAM_ISP,
			"This context does not cause pf:pid:%d hw id:%d ctx_id:%d",
			hw_cmd_args->u.pf_args.pid, hw_id, ctx->ctx_index);
		return;
	}

	for (i = 0; i < CAM_TFE_HW_OUT_RES_MAX; i++) {
		hw_mgr_res = &ctx->res_list_tfe_out[i];
		if (!hw_mgr_res->hw_res[0])
			continue;

		break;
	}

	if (i >= CAM_TFE_HW_OUT_RES_MAX) {
		CAM_ERR(CAM_ISP,
			"NO valid outport resources ctx id:%d req id:%lld",
			ctx->ctx_index, packet->header.request_id);
		return;
	}

	get_res.mid = hw_cmd_args->u.pf_args.mid;
	cmd_update.res = hw_mgr_res->hw_res[0];
	cmd_update.cmd_type = CAM_ISP_HW_CMD_GET_RES_FOR_MID;
	cmd_update.data = (void *) &get_res;

	/* get resource id for given mid */
	rc = hw_mgr_res->hw_res[0]->hw_intf->hw_ops.process_cmd(
		hw_mgr_res->hw_res[0]->hw_intf->hw_priv,
		cmd_update.cmd_type, &cmd_update,
		sizeof(struct cam_isp_hw_get_cmd_update));

	if (rc) {
		CAM_ERR(CAM_ISP,
			"getting mid port resource id failed ctx id:%d req id:%lld",
			ctx->ctx_index, packet->header.request_id);
		return;
	}
	CAM_ERR(CAM_ISP,
		"Page fault on resource id:0x%x ctx id:%d req id:%lld",
		get_res.out_res_id, ctx->ctx_index, packet->header.request_id);
	*resource_type = get_res.out_res_id;

	cam_tfe_mgr_pf_dump(get_res.out_res_id, ctx);

outportlog:
	cam_tfe_mgr_print_io_bufs(hw_mgr, *resource_type, packet,
		ctx_found, ctx);


}

static void cam_tfe_mgr_ctx_irq_dump(struct cam_tfe_hw_mgr_ctx *ctx)
@@ -4463,12 +4636,7 @@ static int cam_tfe_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
		}
		break;
	case CAM_HW_MGR_CMD_DUMP_PF_INFO:
		cam_tfe_mgr_print_io_bufs(
			hw_cmd_args->u.pf_args.pf_data.packet,
			hw_mgr->mgr_common.img_iommu_hdl,
			hw_mgr->mgr_common.img_iommu_hdl_secure,
			hw_cmd_args->u.pf_args.buf_info,
			hw_cmd_args->u.pf_args.mem_found);
		cam_tfe_mgr_dump_pf_data(hw_mgr, hw_cmd_args);
		break;
	case CAM_HW_MGR_CMD_REG_DUMP_ON_FLUSH:
		if (ctx->last_dump_flush_req_id == ctx->applied_req_id)
@@ -5277,9 +5445,10 @@ static int cam_tfe_hw_mgr_sort_dev_with_caps(
	for (i = 0; i < CAM_TFE_HW_NUM_MAX; i++) {
		if (!tfe_hw_mgr->tfe_devices[i])
			continue;
		if (tfe_hw_mgr->tfe_devices[i]->hw_ops.get_hw_caps) {
			tfe_hw_mgr->tfe_devices[i]->hw_ops.get_hw_caps(
				tfe_hw_mgr->tfe_devices[i]->hw_priv,

		if (tfe_hw_mgr->tfe_devices[i]->hw_intf->hw_ops.get_hw_caps) {
			tfe_hw_mgr->tfe_devices[i]->hw_intf->hw_ops.get_hw_caps(
				tfe_hw_mgr->tfe_devices[i]->hw_intf->hw_priv,
				&tfe_hw_mgr->tfe_dev_caps[i],
				sizeof(tfe_hw_mgr->tfe_dev_caps[i]));
		}
@@ -5406,7 +5575,7 @@ int cam_tfe_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl)
		rc = cam_tfe_hw_init(&g_tfe_hw_mgr.tfe_devices[i], i);
		if (!rc) {
			struct cam_hw_info *tfe_hw = (struct cam_hw_info *)
				g_tfe_hw_mgr.tfe_devices[i]->hw_priv;
				g_tfe_hw_mgr.tfe_devices[i]->hw_intf->hw_priv;
			struct cam_hw_soc_info *soc_info = &tfe_hw->soc_info;

			j++;
+1 −1
Original line number Diff line number Diff line
@@ -152,7 +152,7 @@ struct cam_tfe_hw_mgr {
	struct cam_isp_hw_mgr          mgr_common;
	struct cam_hw_intf            *tpg_devices[CAM_TOP_TPG_HW_NUM_MAX];
	struct cam_hw_intf            *csid_devices[CAM_TFE_CSID_HW_NUM_MAX];
	struct cam_hw_intf            *tfe_devices[CAM_TFE_HW_NUM_MAX];
	struct cam_isp_hw_intf_data   *tfe_devices[CAM_TFE_HW_NUM_MAX];
	struct cam_soc_reg_map        *cdm_reg_map[CAM_TFE_HW_NUM_MAX];
	struct mutex                   ctx_mutex;
	atomic_t                       active_ctx_cnt;
+1 −0
Original line number Diff line number Diff line
@@ -179,6 +179,7 @@ enum cam_tfe_csid_cmd_type {
	CAM_TFE_CSID_SET_CSID_DEBUG,
	CAM_TFE_CSID_SOF_IRQ_DEBUG,
	CAM_TFE_CSID_CMD_GET_REG_DUMP,
	CAM_TFE_CSID_LOG_ACQUIRE_DATA,
	CAM_TFE_CSID_CMD_MAX,
};

+1 −1
Original line number Diff line number Diff line
@@ -248,6 +248,6 @@ struct cam_tfe_irq_evt_payload {
 *                          successful initialization
 * @hw_idx:                 Index of TFE HW
 */
int cam_tfe_hw_init(struct cam_hw_intf **tfe_hw, uint32_t hw_idx);
int cam_tfe_hw_init(struct cam_isp_hw_intf_data **tfe_hw, uint32_t hw_idx);

#endif /* _CAM_TFE_HW_INTF_H_ */
Loading