Loading drivers/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c +9 −0 Original line number Diff line number Diff line Loading @@ -643,6 +643,14 @@ static int cam_jpeg_mgr_config_hw(void *hw_mgr_priv, void *config_hw_args) p_cfg_req->dev_type = ctx_data->jpeg_dev_acquire_info.dev_type; request_id = (uintptr_t)config_args->priv; if (request_id <= ctx_data->last_flush_req) { CAM_WARN(CAM_JPEG, "Anomaly submitting flushed req %llu [last_flush %llu] in ctx %u", request_id, ctx_data->last_flush_req); mutex_unlock(&hw_mgr->hw_mgr_mutex); return -EINVAL; } p_cfg_req->req_id = request_id; p_cfg_req->num_hw_entry_processed = 0; hw_update_entries = config_args->hw_update_entries; Loading Loading @@ -1085,6 +1093,7 @@ static int cam_jpeg_mgr_hw_flush(void *hw_mgr_priv, void *flush_hw_args) return -EINVAL; } ctx_data->last_flush_req = flush_args->last_flush_req; switch (flush_args->flush_type) { case CAM_FLUSH_TYPE_ALL: rc = cam_jpeg_mgr_flush(hw_mgr_priv, ctx_data); Loading drivers/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h +2 −0 Original line number Diff line number Diff line Loading @@ -98,6 +98,7 @@ struct cam_jpeg_hw_cfg_req { * @in_use: Flag for context usage * @wait_complete: Completion info * @cdm_cmd: Cdm cmd submitted for that context. * @last_flush_req: req id which was flushed last. */ struct cam_jpeg_hw_ctx_data { void *context_priv; Loading @@ -107,6 +108,7 @@ struct cam_jpeg_hw_ctx_data { bool in_use; struct completion wait_complete; struct cam_cdm_bl_request *cdm_cmd; uint64_t last_flush_req; }; /** Loading drivers/cam_req_mgr/cam_mem_mgr.c +12 −4 Original line number Diff line number Diff line Loading @@ -244,8 +244,9 @@ int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle, return -ENOENT; if (!tbl.bufq[idx].active) { CAM_ERR(CAM_MEM, "Buffer at idx=%d is already unmapped,", idx); CAM_ERR(CAM_MEM, "Buffer at idx=%d is already unmapped, vaddr 0x%x unmaped_vaddr 0x%x", idx, tbl.bufq[idx].vaddr, tbl.bufq[idx].unmaped_vaddr); return -EAGAIN; } Loading Loading @@ -757,6 +758,7 @@ int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd) tbl.bufq[idx].kmdvaddr = kvaddr; tbl.bufq[idx].vaddr = hw_vaddr; tbl.bufq[idx].unmaped_vaddr = 0; tbl.bufq[idx].dma_buf = dmabuf; tbl.bufq[idx].len = cmd->len; tbl.bufq[idx].num_hdl = cmd->num_hdl; Loading Loading @@ -883,10 +885,12 @@ int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd) CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true); tbl.bufq[idx].kmdvaddr = 0; if (cmd->num_hdl > 0) if (cmd->num_hdl > 0) { tbl.bufq[idx].vaddr = hw_vaddr; else tbl.bufq[idx].unmaped_vaddr = 0; } else { tbl.bufq[idx].vaddr = 0; } tbl.bufq[idx].dma_buf = dmabuf; tbl.bufq[idx].len = len; Loading Loading @@ -1020,6 +1024,7 @@ static int cam_mem_mgr_cleanup_table(void) tbl.bufq[i].flags = 0; tbl.bufq[i].buf_handle = -1; tbl.bufq[i].vaddr = 0; tbl.bufq[i].unmaped_vaddr = 0; tbl.bufq[i].len = 0; memset(tbl.bufq[i].hdls, 0, sizeof(int32_t) * tbl.bufq[i].num_hdl); Loading Loading @@ -1078,6 +1083,7 @@ static int cam_mem_util_unmap(int32_t idx, /* Deactivate the buffer queue to prevent multiple unmap */ mutex_lock(&tbl.bufq[idx].q_lock); tbl.bufq[idx].active = false; tbl.bufq[idx].unmaped_vaddr = tbl.bufq[idx].vaddr; tbl.bufq[idx].vaddr = 0; mutex_unlock(&tbl.bufq[idx].q_lock); mutex_unlock(&tbl.m_lock); Loading Loading @@ -1294,6 +1300,7 @@ int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp, tbl.bufq[idx].kmdvaddr = kvaddr; tbl.bufq[idx].vaddr = iova; tbl.bufq[idx].unmaped_vaddr = 0; tbl.bufq[idx].len = inp->size; tbl.bufq[idx].num_hdl = num_hdl; Loading Loading @@ -1443,6 +1450,7 @@ int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp, tbl.bufq[idx].kmdvaddr = 0; tbl.bufq[idx].vaddr = iova; tbl.bufq[idx].unmaped_vaddr = 0; tbl.bufq[idx].len = request_len; tbl.bufq[idx].num_hdl = num_hdl; Loading drivers/cam_req_mgr/cam_mem_mgr.h +17 −15 Original line number Diff line number Diff line Loading @@ -41,6 +41,7 @@ enum cam_smmu_mapping_client { * @is_imported: Flag indicating if buffer is imported from an FD in user space * @is_internal: Flag indicating kernel allocated buffer * @timestamp: Timestamp at which this entry in tbl was made * @unmaped_vaddr: This will cache the vaddr if it is unmapped. */ struct cam_mem_buf_queue { struct dma_buf *dma_buf; Loading @@ -53,6 +54,7 @@ struct cam_mem_buf_queue { size_t len; uint32_t flags; uint64_t vaddr; uint64_t unmaped_vaddr; uintptr_t kmdvaddr; bool active; bool is_imported; Loading Loading
drivers/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c +9 −0 Original line number Diff line number Diff line Loading @@ -643,6 +643,14 @@ static int cam_jpeg_mgr_config_hw(void *hw_mgr_priv, void *config_hw_args) p_cfg_req->dev_type = ctx_data->jpeg_dev_acquire_info.dev_type; request_id = (uintptr_t)config_args->priv; if (request_id <= ctx_data->last_flush_req) { CAM_WARN(CAM_JPEG, "Anomaly submitting flushed req %llu [last_flush %llu] in ctx %u", request_id, ctx_data->last_flush_req); mutex_unlock(&hw_mgr->hw_mgr_mutex); return -EINVAL; } p_cfg_req->req_id = request_id; p_cfg_req->num_hw_entry_processed = 0; hw_update_entries = config_args->hw_update_entries; Loading Loading @@ -1085,6 +1093,7 @@ static int cam_jpeg_mgr_hw_flush(void *hw_mgr_priv, void *flush_hw_args) return -EINVAL; } ctx_data->last_flush_req = flush_args->last_flush_req; switch (flush_args->flush_type) { case CAM_FLUSH_TYPE_ALL: rc = cam_jpeg_mgr_flush(hw_mgr_priv, ctx_data); Loading
drivers/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h +2 −0 Original line number Diff line number Diff line Loading @@ -98,6 +98,7 @@ struct cam_jpeg_hw_cfg_req { * @in_use: Flag for context usage * @wait_complete: Completion info * @cdm_cmd: Cdm cmd submitted for that context. * @last_flush_req: req id which was flushed last. */ struct cam_jpeg_hw_ctx_data { void *context_priv; Loading @@ -107,6 +108,7 @@ struct cam_jpeg_hw_ctx_data { bool in_use; struct completion wait_complete; struct cam_cdm_bl_request *cdm_cmd; uint64_t last_flush_req; }; /** Loading
drivers/cam_req_mgr/cam_mem_mgr.c +12 −4 Original line number Diff line number Diff line Loading @@ -244,8 +244,9 @@ int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle, return -ENOENT; if (!tbl.bufq[idx].active) { CAM_ERR(CAM_MEM, "Buffer at idx=%d is already unmapped,", idx); CAM_ERR(CAM_MEM, "Buffer at idx=%d is already unmapped, vaddr 0x%x unmaped_vaddr 0x%x", idx, tbl.bufq[idx].vaddr, tbl.bufq[idx].unmaped_vaddr); return -EAGAIN; } Loading Loading @@ -757,6 +758,7 @@ int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd) tbl.bufq[idx].kmdvaddr = kvaddr; tbl.bufq[idx].vaddr = hw_vaddr; tbl.bufq[idx].unmaped_vaddr = 0; tbl.bufq[idx].dma_buf = dmabuf; tbl.bufq[idx].len = cmd->len; tbl.bufq[idx].num_hdl = cmd->num_hdl; Loading Loading @@ -883,10 +885,12 @@ int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd) CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true); tbl.bufq[idx].kmdvaddr = 0; if (cmd->num_hdl > 0) if (cmd->num_hdl > 0) { tbl.bufq[idx].vaddr = hw_vaddr; else tbl.bufq[idx].unmaped_vaddr = 0; } else { tbl.bufq[idx].vaddr = 0; } tbl.bufq[idx].dma_buf = dmabuf; tbl.bufq[idx].len = len; Loading Loading @@ -1020,6 +1024,7 @@ static int cam_mem_mgr_cleanup_table(void) tbl.bufq[i].flags = 0; tbl.bufq[i].buf_handle = -1; tbl.bufq[i].vaddr = 0; tbl.bufq[i].unmaped_vaddr = 0; tbl.bufq[i].len = 0; memset(tbl.bufq[i].hdls, 0, sizeof(int32_t) * tbl.bufq[i].num_hdl); Loading Loading @@ -1078,6 +1083,7 @@ static int cam_mem_util_unmap(int32_t idx, /* Deactivate the buffer queue to prevent multiple unmap */ mutex_lock(&tbl.bufq[idx].q_lock); tbl.bufq[idx].active = false; tbl.bufq[idx].unmaped_vaddr = tbl.bufq[idx].vaddr; tbl.bufq[idx].vaddr = 0; mutex_unlock(&tbl.bufq[idx].q_lock); mutex_unlock(&tbl.m_lock); Loading Loading @@ -1294,6 +1300,7 @@ int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp, tbl.bufq[idx].kmdvaddr = kvaddr; tbl.bufq[idx].vaddr = iova; tbl.bufq[idx].unmaped_vaddr = 0; tbl.bufq[idx].len = inp->size; tbl.bufq[idx].num_hdl = num_hdl; Loading Loading @@ -1443,6 +1450,7 @@ int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp, tbl.bufq[idx].kmdvaddr = 0; tbl.bufq[idx].vaddr = iova; tbl.bufq[idx].unmaped_vaddr = 0; tbl.bufq[idx].len = request_len; tbl.bufq[idx].num_hdl = num_hdl; Loading
drivers/cam_req_mgr/cam_mem_mgr.h +17 −15 Original line number Diff line number Diff line Loading @@ -41,6 +41,7 @@ enum cam_smmu_mapping_client { * @is_imported: Flag indicating if buffer is imported from an FD in user space * @is_internal: Flag indicating kernel allocated buffer * @timestamp: Timestamp at which this entry in tbl was made * @unmaped_vaddr: This will cache the vaddr if it is unmapped. */ struct cam_mem_buf_queue { struct dma_buf *dma_buf; Loading @@ -53,6 +54,7 @@ struct cam_mem_buf_queue { size_t len; uint32_t flags; uint64_t vaddr; uint64_t unmaped_vaddr; uintptr_t kmdvaddr; bool active; bool is_imported; Loading