Loading drivers/media/platform/msm/vidc/msm_smem.c +532 −107 Original line number Diff line number Diff line Loading @@ -17,173 +17,598 @@ #include <linux/iommu.h> #include <linux/msm_dma_iommu_mapping.h> #include <linux/msm_ion.h> #include <linux/ion_kernel.h> #include <linux/slab.h> #include <linux/types.h> #include "msm_vidc.h" #include "msm_vidc_debug.h" #include "msm_vidc_resources.h" struct smem_client { int mem_type; void *clnt; struct msm_vidc_platform_resources *res; enum session_type session_type; }; #define ion_phys_addr_t dma_addr_t struct ion_handle { struct kref ref; unsigned int user_ref_count; struct ion_client *client; struct ion_buffer *buffer; struct rb_node node; unsigned int kmap_cnt; int id; }; static inline int msm_ion_get_device_address(struct smem_client *smem_client, struct ion_handle *hndl, unsigned long align, ion_phys_addr_t *iova, unsigned long *buffer_size, static int msm_dma_get_device_address(struct dma_buf *dbuf, unsigned long align, dma_addr_t *iova, unsigned long *buffer_size, unsigned long flags, enum hal_buffer buffer_type, unsigned long session_type, struct msm_vidc_platform_resources *res, struct dma_mapping_info *mapping_info) { return -ENODEV; int rc = 0; struct dma_buf_attachment *attach; struct sg_table *table = NULL; struct context_bank_info *cb = NULL; if (!dbuf || !iova || !buffer_size || !mapping_info) { dprintk(VIDC_ERR, "Invalid params: %pK, %pK, %pK, %pK\n", dbuf, iova, buffer_size, mapping_info); return -EINVAL; } if (is_iommu_present(res)) { cb = msm_smem_get_context_bank( session_type, (flags & SMEM_SECURE), res, buffer_type); if (!cb) { dprintk(VIDC_ERR, "%s: Failed to get context bank device\n", __func__); rc = -EIO; goto mem_map_failed; } /* Check if the dmabuf size matches expected size */ if (dbuf->size < *buffer_size) { rc = -EINVAL; dprintk(VIDC_ERR, "Size mismatch: Dmabuf size: %zu Expected Size: %lu", dbuf->size, *buffer_size); msm_vidc_res_handle_fatal_hw_error(res, true); goto mem_buf_size_mismatch; } /* Prepare a dma buf for dma on the given device */ attach = dma_buf_attach(dbuf, cb->dev); if (IS_ERR_OR_NULL(attach)) { rc = PTR_ERR(attach) ?: -ENOMEM; dprintk(VIDC_ERR, "Failed to attach dmabuf\n"); goto mem_buf_attach_failed; } /* * Get the scatterlist for the given attachment * Mapping of sg is taken care by map attachment */ attach->dma_map_attrs = DMA_ATTR_DELAYED_UNMAP; if (res->sys_cache_present) attach->dma_map_attrs |= DMA_ATTR_IOMMU_USE_UPSTREAM_HINT; table = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); if (IS_ERR_OR_NULL(table)) { rc = PTR_ERR(table) ?: -ENOMEM; dprintk(VIDC_ERR, "Failed to map table\n"); goto mem_map_table_failed; } static inline int msm_ion_put_device_address(struct smem_client *smem_client, struct ion_handle *hndl, u32 flags, /* debug trace's need to be updated later */ trace_msm_smem_buffer_iommu_op_start("MAP", 0, 0, align, *iova, *buffer_size); if (table->sgl) { *iova = table->sgl->dma_address; *buffer_size = table->sgl->dma_length; } else { dprintk(VIDC_ERR, "sgl is NULL\n"); rc = -ENOMEM; goto mem_map_sg_failed; } mapping_info->dev = cb->dev; mapping_info->mapping = cb->mapping; mapping_info->table = table; mapping_info->attach = attach; mapping_info->buf = dbuf; mapping_info->cb_info = (void *)cb; trace_msm_smem_buffer_iommu_op_end("MAP", 0, 0, align, *iova, *buffer_size); } else { dprintk(VIDC_DBG, "iommu not present, use phys mem addr\n"); } return 0; mem_map_sg_failed: dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL); mem_map_table_failed: dma_buf_detach(dbuf, attach); mem_buf_size_mismatch: mem_buf_attach_failed: dma_buf_put(dbuf); mem_map_failed: return rc; } static int msm_dma_put_device_address(u32 flags, struct dma_mapping_info *mapping_info, enum hal_buffer buffer_type) { return -ENODEV; int rc = 0; struct context_bank_info *cb = NULL; if (!mapping_info) { dprintk(VIDC_WARN, "Invalid mapping_info\n"); return -EINVAL; } static inline void *msm_ion_get_dma_buf(int fd) { return ERR_PTR(-ENODEV); if (!mapping_info->dev || !mapping_info->table || !mapping_info->buf || !mapping_info->attach || !mapping_info->cb_info) { dprintk(VIDC_WARN, "Invalid params\n"); return -EINVAL; } void *msm_smem_get_dma_buf(int fd) { return ERR_PTR(-ENODEV); trace_msm_smem_buffer_iommu_op_start("UNMAP", 0, 0, 0, 0, 0); msm_dma_unmap_sg(mapping_info->dev, mapping_info->table->sgl, mapping_info->table->nents, DMA_BIDIRECTIONAL, mapping_info->buf); dma_buf_unmap_attachment(mapping_info->attach, mapping_info->table, DMA_BIDIRECTIONAL); dma_buf_detach(mapping_info->buf, mapping_info->attach); dma_buf_put(mapping_info->buf); trace_msm_smem_buffer_iommu_op_end("UNMAP", 0, 0, 0, 0, 0); mapping_info->dev = NULL; mapping_info->mapping = NULL; mapping_info->table = NULL; mapping_info->attach = NULL; mapping_info->buf = NULL; mapping_info->cb_info = NULL; return rc; } static inline void msm_ion_put_dma_buf(struct dma_buf *dma_buf) struct dma_buf *msm_smem_get_dma_buf(int fd) { struct dma_buf *dma_buf; dma_buf = dma_buf_get(fd); if (IS_ERR_OR_NULL(dma_buf)) { dprintk(VIDC_ERR, "Failed to get dma_buf for %d, error %ld\n", fd, PTR_ERR(dma_buf)); dma_buf = NULL; } return dma_buf; } void msm_smem_put_dma_buf(void *dma_buf) { if (!dma_buf) { dprintk(VIDC_ERR, "%s: NULL dma_buf\n", __func__); return; } static inline struct ion_handle *msm_ion_get_handle(void *ion_client, struct dma_buf *dma_buf) { return ERR_PTR(-ENODEV); dma_buf_put((struct dma_buf *)dma_buf); return; } void *msm_smem_get_handle(struct smem_client *client, void *dma_buf) int msm_smem_map_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem) { return ERR_PTR(-ENODEV); int rc = 0; dma_addr_t iova = 0; u32 temp = 0; unsigned long buffer_size = 0; unsigned long align = SZ_4K; unsigned long dma_flags = 0; struct dma_buf *dbuf; unsigned long ion_flags = 0; if (!inst || !smem) { dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n", __func__, inst, smem); rc = -EINVAL; goto exit; } static inline void msm_ion_put_handle(struct ion_client *ion_client, struct ion_handle *ion_handle) { if (smem->refcount) { smem->refcount++; goto exit; } void msm_smem_put_handle(struct smem_client *client, void *handle) { dbuf = msm_smem_get_dma_buf(smem->fd); if (!dbuf) { rc = -EINVAL; goto exit; } static inline int msm_ion_map_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem) { return -ENODEV; smem->dma_buf = dbuf; rc = dma_buf_get_flags(dbuf, &ion_flags); if (rc) { dprintk(VIDC_ERR, "Failed to get dma buf flags: %d\n", rc); goto exit; } if (ion_flags & ION_FLAG_CACHED) smem->flags |= SMEM_CACHED; int msm_smem_map_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem) { return -ENODEV; if (ion_flags & ION_FLAG_SECURE) smem->flags |= SMEM_SECURE; buffer_size = smem->size; rc = msm_dma_get_device_address(dbuf, align, &iova, &buffer_size, smem->flags, smem->buffer_type, inst->session_type, &(inst->core->resources), &smem->mapping_info); if (rc) { dprintk(VIDC_ERR, "Failed to get device address: %d\n", rc); goto exit; } temp = (u32)iova; if ((dma_addr_t)temp != iova) { dprintk(VIDC_ERR, "iova(%pa) truncated to %#x", &iova, temp); rc = -EINVAL; goto exit; } static inline int msm_ion_unmap_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem) { return -ENODEV; smem->device_addr = (u32)iova + smem->offset; smem->refcount++; exit: return rc; } int msm_smem_unmap_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem) { return -ENODEV; int rc = 0; if (!inst || !smem) { dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n", __func__, inst, smem); rc = -EINVAL; goto exit; } static inline int get_secure_flag_for_buffer_type( struct smem_client *client, enum hal_buffer buffer_type) { return -ENODEV; if (smem->refcount) { smem->refcount--; } else { dprintk(VIDC_WARN, "unmap called while refcount is zero already\n"); return -EINVAL; } static inline int alloc_ion_mem(struct smem_client *client, size_t size, u32 align, u32 flags, enum hal_buffer buffer_type, struct msm_smem *mem, int map_kernel) { return -ENODEV; if (smem->refcount) goto exit; rc = msm_dma_put_device_address(smem->flags, &smem->mapping_info, smem->buffer_type); if (rc) { dprintk(VIDC_ERR, "Failed to put device address: %d\n", rc); goto exit; } static inline int free_ion_mem(struct smem_client *client, struct msm_smem *mem) msm_smem_put_dma_buf(smem->dma_buf); smem->device_addr = 0x0; smem->dma_buf = NULL; exit: return rc; } static int get_secure_flag_for_buffer_type( u32 session_type, enum hal_buffer buffer_type) { return -ENODEV; switch (buffer_type) { case HAL_BUFFER_INPUT: if (session_type == MSM_VIDC_ENCODER) return ION_FLAG_CP_PIXEL; else return ION_FLAG_CP_BITSTREAM; case HAL_BUFFER_OUTPUT: case HAL_BUFFER_OUTPUT2: if (session_type == MSM_VIDC_ENCODER) return ION_FLAG_CP_BITSTREAM; else return ION_FLAG_CP_PIXEL; case HAL_BUFFER_INTERNAL_SCRATCH: return ION_FLAG_CP_BITSTREAM; case HAL_BUFFER_INTERNAL_SCRATCH_1: return ION_FLAG_CP_NON_PIXEL; case HAL_BUFFER_INTERNAL_SCRATCH_2: return ION_FLAG_CP_PIXEL; case HAL_BUFFER_INTERNAL_PERSIST: return ION_FLAG_CP_BITSTREAM; case HAL_BUFFER_INTERNAL_PERSIST_1: return ION_FLAG_CP_NON_PIXEL; default: WARN(1, "No matching secure flag for buffer type : %x\n", buffer_type); return -EINVAL; } } static inline void *ion_new_client(void) static int alloc_dma_mem(size_t size, u32 align, u32 flags, enum hal_buffer buffer_type, int map_kernel, struct msm_vidc_platform_resources *res, u32 session_type, struct msm_smem *mem) { return ERR_PTR(-ENODEV); }; dma_addr_t iova = 0; unsigned long buffer_size = 0; unsigned long heap_mask = 0; int rc = 0; int ion_flags = 0; struct dma_buf *dbuf = NULL; unsigned long page_count = 0; unsigned long mapped_pages = 0; void *kvaddr = NULL; static inline void ion_delete_client(struct smem_client *client) if (!res) { dprintk(VIDC_ERR, "%s: NULL res\n", __func__); return -EINVAL; } align = ALIGN(align, SZ_4K); size = ALIGN(size, SZ_4K); if (is_iommu_present(res)) { heap_mask = ION_HEAP(ION_SYSTEM_HEAP_ID); } else { dprintk(VIDC_DBG, "allocate shared memory from adsp heap size %zx align %d\n", size, align); heap_mask = ION_HEAP(ION_ADSP_HEAP_ID); } if (flags & SMEM_CACHED) ion_flags |= ION_FLAG_CACHED; if (flags & SMEM_SECURE) { int secure_flag = get_secure_flag_for_buffer_type( session_type, buffer_type); if (secure_flag < 0) { rc = secure_flag; goto fail_shared_mem_alloc; } ion_flags |= ION_FLAG_SECURE | secure_flag; heap_mask = ION_HEAP(ION_SECURE_HEAP_ID); if (res->slave_side_cp) { heap_mask = ION_HEAP(ION_CP_MM_HEAP_ID); size = ALIGN(size, SZ_1M); align = ALIGN(size, SZ_1M); } } trace_msm_smem_buffer_dma_op_start("ALLOC", (u32)buffer_type, heap_mask, size, align, flags, map_kernel); dbuf = ion_alloc(size, heap_mask, ion_flags); if (IS_ERR_OR_NULL(dbuf)) { dprintk(VIDC_ERR, "Failed to allocate shared memory = %zx, %#x\n", size, flags); rc = -ENOMEM; goto fail_shared_mem_alloc; } trace_msm_smem_buffer_dma_op_end("ALLOC", (u32)buffer_type, heap_mask, size, align, flags, map_kernel); mem->flags = flags; mem->buffer_type = buffer_type; mem->offset = 0; mem->size = size; mem->dma_buf = dbuf; if (map_kernel) { mem->pages = size/PAGE_SIZE; for (page_count = 1; page_count <= mem->pages; page_count++) { kvaddr = dma_buf_kmap(dbuf, page_count); if (IS_ERR_OR_NULL(kvaddr)) { dprintk(VIDC_ERR, "Failed to map shared mem in kernel\n"); rc = -EIO; goto fail_map; } if (page_count == 1) mem->kvaddr = kvaddr; } } else { mem->kvaddr = NULL; mem->pages = 0; } rc = msm_dma_get_device_address(dbuf, align, &iova, &buffer_size, flags, buffer_type, session_type, res, &mem->mapping_info); if (rc) { dprintk(VIDC_ERR, "Failed to get device address: %d\n", rc); goto fail_device_address; } mem->device_addr = (u32)iova; if ((dma_addr_t)mem->device_addr != iova) { dprintk(VIDC_ERR, "iova(%pa) truncated to %#x", &iova, mem->device_addr); goto fail_device_address; } dprintk(VIDC_DBG, "%s: dma_buf = %pK, device_addr = %x, size = %d, kvaddr = %pK, buffer_type = %#x, flags = %#lx\n", __func__, mem->dma_buf, mem->device_addr, mem->size, mem->kvaddr, mem->buffer_type, mem->flags); return rc; fail_device_address: mapped_pages = mem->pages; fail_map: mapped_pages = page_count; if (mem->kvaddr) { kvaddr = mem->kvaddr; for (page_count = 1; page_count < mapped_pages; page_count++) { dma_buf_kunmap(mem->dma_buf, page_count, kvaddr); kvaddr += PAGE_SIZE; } mem->pages = 0; mem->kvaddr = NULL; } dma_buf_put(dbuf); fail_shared_mem_alloc: return rc; } static int free_dma_mem(struct msm_smem *mem) { int rc = 0; void *kvaddr; unsigned long page_count = 0; dprintk(VIDC_DBG, "%s: dma_buf = %pK, device_addr = %x, size = %d, kvaddr = %pK, buffer_type = %#x\n", __func__, mem->dma_buf, mem->device_addr, mem->size, mem->kvaddr, mem->buffer_type); if (mem->device_addr) { msm_dma_put_device_address(mem->flags, &mem->mapping_info, mem->buffer_type); mem->device_addr = 0x0; } static inline int msm_ion_cache_operations(void *ion_client, void *ion_handle, if (mem->kvaddr) { kvaddr = mem->kvaddr; for (page_count = 1; page_count <= mem->pages; page_count++) { dma_buf_kunmap(mem->dma_buf, page_count, kvaddr); kvaddr += PAGE_SIZE; } mem->pages = 0; mem->kvaddr = NULL; } if (mem->dma_buf) { trace_msm_smem_buffer_dma_op_start("FREE", (u32)mem->buffer_type, -1, mem->size, -1, mem->flags, -1); dma_buf_put(mem->dma_buf); mem->dma_buf = NULL; trace_msm_smem_buffer_dma_op_end("FREE", (u32)mem->buffer_type, -1, mem->size, -1, mem->flags, -1); } return rc; } int msm_smem_cache_operations(struct dma_buf *dbuf, unsigned long offset, unsigned long size, enum smem_cache_ops cache_op) { return -ENODEV; int rc = 0; unsigned long flags = 0; int msm_cache_ops = 0; if (!dbuf) { dprintk(VIDC_ERR, "%s: NULL dma_buf\n", __func__); return -EINVAL; } int msm_smem_cache_operations(struct smem_client *client, void *handle, unsigned long offset, unsigned long size, enum smem_cache_ops cache_op) { return -ENODEV; /* Identify the get flags & offset based cache operation */ rc = dma_buf_get_flags(dbuf, &flags); if (rc) { dprintk(VIDC_ERR, "Failed to get dma buf flags: %d\n", rc); goto exit; } if (!(flags & ION_FLAG_CACHED)) goto exit; void *msm_smem_new_client(enum smem_type mtype, void *platform_resources, enum session_type stype) { return ERR_PTR(-ENODEV); /* Partial clean of dma buf api is in progress */ switch (cache_op) { case SMEM_CACHE_CLEAN: case SMEM_CACHE_INVALIDATE: case SMEM_CACHE_CLEAN_INVALIDATE: dma_buf_begin_cpu_access(dbuf, DMA_BIDIRECTIONAL); dma_buf_end_cpu_access(dbuf, DMA_BIDIRECTIONAL); break; default: dprintk(VIDC_ERR, "%s: cache (%d) operation not supported\n", __func__, cache_op); rc = -EINVAL; goto exit; } if (rc) { dprintk(VIDC_ERR, "%s: cache operation failed dma_buf %pK, %d, offset %lu, size %lu, msm_cache_ops %u\n", __func__, rc, dbuf, offset, size, msm_cache_ops); goto exit; } int msm_smem_alloc(struct smem_client *client, size_t size, u32 align, u32 flags, enum hal_buffer buffer_type, int map_kernel, struct msm_smem *smem) exit: return rc; } int msm_smem_alloc(size_t size, u32 align, u32 flags, enum hal_buffer buffer_type, int map_kernel, void *res, u32 session_type, struct msm_smem *smem) { return -ENODEV; int rc = 0; if (!smem || !size) { dprintk(VIDC_ERR, "%s: NULL smem or %d size\n", __func__, (u32)size); return -EINVAL; } int msm_smem_free(void *clt, struct msm_smem *smem) rc = alloc_dma_mem(size, align, flags, buffer_type, map_kernel, (struct msm_vidc_platform_resources *)res, session_type, smem); return rc; } int msm_smem_free(struct msm_smem *smem) { return -ENODEV; int rc = 0; if (!smem) { dprintk(VIDC_ERR, "NULL smem passed\n"); return -EINVAL; } rc = free_dma_mem(smem); return rc; }; void msm_smem_delete_client(void *clt) struct context_bank_info *msm_smem_get_context_bank(u32 session_type, bool is_secure, struct msm_vidc_platform_resources *res, enum hal_buffer buffer_type) { struct context_bank_info *cb = NULL, *match = NULL; /* * HAL_BUFFER_INPUT is directly mapped to bitstream CB in DT * as the buffer type structure was initially designed * just for decoder. For Encoder, input should be mapped to * yuvpixel CB. So swap the buffer types just in this local scope. */ if (is_secure && session_type == MSM_VIDC_ENCODER) { if (buffer_type == HAL_BUFFER_INPUT) buffer_type = HAL_BUFFER_OUTPUT; else if (buffer_type == HAL_BUFFER_OUTPUT) buffer_type = HAL_BUFFER_INPUT; } struct context_bank_info *msm_smem_get_context_bank(void *clt, bool is_secure, enum hal_buffer buffer_type) { return ERR_PTR(-ENODEV); list_for_each_entry(cb, &res->context_banks, list) { if (cb->is_secure == is_secure && cb->buffer_type & buffer_type) { match = cb; break; } } if (!match) dprintk(VIDC_ERR, "%s: cb not found for buffer_type %x, is_secure %d\n", __func__, buffer_type, is_secure); return match; } drivers/media/platform/msm/vidc/msm_vidc.c +1 −10 Original line number Diff line number Diff line Loading @@ -460,7 +460,7 @@ int msm_vidc_release_buffer(void *instance, int type, unsigned int index) rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE); if (rc) { dprintk(VIDC_ERR, "%s: Failed to move inst: %pK to rel res done", "%s: Failed to move inst: %pK to rel res done\n", __func__, inst); } } Loading Loading @@ -1645,12 +1645,6 @@ void *msm_vidc_open(int core_id, int session_type) i <= SESSION_MSG_INDEX(SESSION_MSG_END); i++) { init_completion(&inst->completions[i]); } inst->mem_client = msm_smem_new_client(SMEM_DMA, &inst->core->resources, session_type); if (!inst->mem_client) { dprintk(VIDC_ERR, "Failed to create memory client\n"); goto fail_mem_client; } if (session_type == MSM_VIDC_DECODER) { msm_vdec_inst_init(inst); Loading Loading @@ -1716,8 +1710,6 @@ void *msm_vidc_open(int core_id, int session_type) vb2_queue_release(&inst->bufq[CAPTURE_PORT].vb2_bufq); fail_bufq_capture: msm_comm_ctrl_deinit(inst); msm_smem_delete_client(inst->mem_client); fail_mem_client: mutex_destroy(&inst->sync_lock); mutex_destroy(&inst->bufq[CAPTURE_PORT].lock); mutex_destroy(&inst->bufq[OUTPUT_PORT].lock); Loading Loading @@ -1898,7 +1890,6 @@ int msm_vidc_close(void *instance) } msm_comm_session_clean(inst); msm_smem_delete_client(inst->mem_client); kref_put(&inst->kref, close_helper); return 0; Loading drivers/media/platform/msm/vidc/msm_vidc_common.c +17 −24 Original line number Diff line number Diff line Loading @@ -1539,13 +1539,13 @@ static void handle_session_init_done(enum hal_command_response cmd, void *data) print_cap("max_work_modes", &inst->capability.max_work_modes); print_cap("ubwc_cr_stats", &inst->capability.ubwc_cr_stats); dprintk(VIDC_DBG, "profile count : %u", dprintk(VIDC_DBG, "profile count : %u\n", inst->capability.profile_level.profile_count); for (i = 0; i < inst->capability.profile_level.profile_count; i++) { profile_level = &inst->capability.profile_level.profile_level[i]; dprintk(VIDC_DBG, "profile : %u ", profile_level->profile); dprintk(VIDC_DBG, "level : %u ", profile_level->level); dprintk(VIDC_DBG, "profile : %u\n", profile_level->profile); dprintk(VIDC_DBG, "level : %u\n", profile_level->level); } signal_session_msg_receipt(cmd, inst); Loading Loading @@ -3342,7 +3342,7 @@ static int set_output_buffers(struct msm_vidc_inst *inst, { int rc = 0; struct internal_buf *binfo = NULL; u32 smem_flags = 0, buffer_size; u32 smem_flags = SMEM_UNCACHED, buffer_size; struct hal_buffer_requirements *output_buf, *extradata_buf; int i; struct hfi_device *hdev; Loading Loading @@ -3568,7 +3568,7 @@ static int allocate_and_set_internal_bufs(struct msm_vidc_inst *inst, struct msm_vidc_list *buf_list) { struct internal_buf *binfo; u32 smem_flags = 0; u32 smem_flags = SMEM_UNCACHED; int rc = 0; int i = 0; Loading Loading @@ -3837,7 +3837,7 @@ int msm_vidc_comm_cmd(void *instance, union msm_v4l2_cmd *cmd) case V4L2_DEC_CMD_STOP: { struct eos_buf *binfo = NULL; u32 smem_flags = 0; u32 smem_flags = SMEM_UNCACHED; get_inst(inst->core, inst); Loading Loading @@ -5318,8 +5318,9 @@ int msm_comm_smem_alloc(struct msm_vidc_inst *inst, dprintk(VIDC_ERR, "%s: invalid inst: %pK\n", __func__, inst); return -EINVAL; } rc = msm_smem_alloc(inst->mem_client, size, align, flags, buffer_type, map_kernel, smem); rc = msm_smem_alloc(size, align, flags, buffer_type, map_kernel, &(inst->core->resources), inst->session_type, smem); return rc; } Loading @@ -5330,7 +5331,7 @@ void msm_comm_smem_free(struct msm_vidc_inst *inst, struct msm_smem *mem) "%s: invalid params: %pK %pK\n", __func__, inst, mem); return; } msm_smem_free(inst->mem_client, mem); msm_smem_free(mem); } int msm_comm_smem_cache_operations(struct msm_vidc_inst *inst, Loading @@ -5341,7 +5342,7 @@ int msm_comm_smem_cache_operations(struct msm_vidc_inst *inst, "%s: invalid params: %pK %pK\n", __func__, inst, mem); return -EINVAL; } return msm_smem_cache_operations(inst->mem_client, NULL, return msm_smem_cache_operations(mem->dma_buf, mem->offset, mem->size, cache_ops); } Loading @@ -5349,8 +5350,6 @@ int msm_comm_qbuf_cache_operations(struct msm_vidc_inst *inst, struct v4l2_buffer *b) { int rc = 0, i; void *dma_buf; void *handle; bool skip; if (!inst || !b) { Loading @@ -5362,10 +5361,9 @@ int msm_comm_qbuf_cache_operations(struct msm_vidc_inst *inst, for (i = 0; i < b->length; i++) { unsigned long offset, size; enum smem_cache_ops cache_ops; struct dma_buf *dbuf; dma_buf = msm_smem_get_dma_buf(b->m.planes[i].m.fd); handle = msm_smem_get_handle(inst->mem_client, dma_buf); dbuf = msm_smem_get_dma_buf(b->m.planes[i].m.fd); offset = b->m.planes[i].data_offset; size = b->m.planes[i].length; cache_ops = SMEM_CACHE_INVALIDATE; Loading Loading @@ -5400,15 +5398,14 @@ int msm_comm_qbuf_cache_operations(struct msm_vidc_inst *inst, } if (!skip) { rc = msm_smem_cache_operations(inst->mem_client, handle, rc = msm_smem_cache_operations(dbuf, offset, size, cache_ops); if (rc) print_v4l2_buffer(VIDC_ERR, "qbuf cache ops failed", inst, b); } msm_smem_put_handle(inst->mem_client, handle); msm_smem_put_dma_buf(dma_buf); msm_smem_put_dma_buf(dbuf); } return rc; Loading @@ -5418,8 +5415,7 @@ int msm_comm_dqbuf_cache_operations(struct msm_vidc_inst *inst, struct v4l2_buffer *b) { int rc = 0, i; void *dma_buf; void *handle; struct dma_buf *dma_buf; bool skip; if (!inst || !b) { Loading @@ -5433,8 +5429,6 @@ int msm_comm_dqbuf_cache_operations(struct msm_vidc_inst *inst, enum smem_cache_ops cache_ops; dma_buf = msm_smem_get_dma_buf(b->m.planes[i].m.fd); handle = msm_smem_get_handle(inst->mem_client, dma_buf); offset = b->m.planes[i].data_offset; size = b->m.planes[i].length; cache_ops = SMEM_CACHE_INVALIDATE; Loading @@ -5461,14 +5455,13 @@ int msm_comm_dqbuf_cache_operations(struct msm_vidc_inst *inst, } if (!skip) { rc = msm_smem_cache_operations(inst->mem_client, handle, rc = msm_smem_cache_operations(dma_buf, offset, size, cache_ops); if (rc) print_v4l2_buffer(VIDC_ERR, "dqbuf cache ops failed", inst, b); } msm_smem_put_handle(inst->mem_client, handle); msm_smem_put_dma_buf(dma_buf); } Loading drivers/media/platform/msm/vidc/msm_vidc_internal.h +12 −16 Original line number Diff line number Diff line Loading @@ -382,7 +382,6 @@ struct msm_vidc_inst { struct msm_vidc_list etb_data; struct msm_vidc_list fbd_data; struct buffer_requirements buff_req; struct smem_client *mem_client; struct v4l2_ctrl_handler ctrl_handler; struct completion completions[SESSION_MSG_END - SESSION_MSG_START + 1]; struct v4l2_ctrl **cluster; Loading Loading @@ -454,24 +453,21 @@ struct msm_vidc_buffer { }; void msm_comm_handle_thermal_event(void); void *msm_smem_new_client(enum smem_type mtype, void *platform_resources, enum session_type stype); int msm_smem_alloc(struct smem_client *client, size_t size, u32 align, u32 flags, enum hal_buffer buffer_type, int map_kernel, struct msm_smem *smem); int msm_smem_free(void *clt, struct msm_smem *mem); void msm_smem_delete_client(void *clt); struct context_bank_info *msm_smem_get_context_bank(void *clt, bool is_secure, enum hal_buffer buffer_type); int msm_smem_alloc(size_t size, u32 align, u32 flags, enum hal_buffer buffer_type, int map_kernel, void *res, u32 session_type, struct msm_smem *smem); int msm_smem_free(struct msm_smem *smem); int msm_smem_cache_operations(struct dma_buf *dbuf, unsigned long offset, unsigned long size, enum smem_cache_ops cache_op); struct context_bank_info *msm_smem_get_context_bank(u32 session_type, bool is_secure, struct msm_vidc_platform_resources *res, enum hal_buffer buffer_type); int msm_smem_map_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem); int msm_smem_unmap_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem); void *msm_smem_get_dma_buf(int fd); struct dma_buf *msm_smem_get_dma_buf(int fd); void msm_smem_put_dma_buf(void *dma_buf); void *msm_smem_get_handle(struct smem_client *client, void *dma_buf); void msm_smem_put_handle(struct smem_client *client, void *handle); int msm_smem_cache_operations(struct smem_client *client, void *handle, unsigned long offset, unsigned long size, enum smem_cache_ops cache_op); void msm_vidc_fw_unload_handler(struct work_struct *work); /* * XXX: normally should be in msm_vidc.h, but that's meant for public APIs, Loading drivers/media/platform/msm/vidc/venus_hfi.c +20 −35 File changed.Preview size limit exceeded, changes collapsed. Show changes Loading
drivers/media/platform/msm/vidc/msm_smem.c +532 −107 Original line number Diff line number Diff line Loading @@ -17,173 +17,598 @@ #include <linux/iommu.h> #include <linux/msm_dma_iommu_mapping.h> #include <linux/msm_ion.h> #include <linux/ion_kernel.h> #include <linux/slab.h> #include <linux/types.h> #include "msm_vidc.h" #include "msm_vidc_debug.h" #include "msm_vidc_resources.h" struct smem_client { int mem_type; void *clnt; struct msm_vidc_platform_resources *res; enum session_type session_type; }; #define ion_phys_addr_t dma_addr_t struct ion_handle { struct kref ref; unsigned int user_ref_count; struct ion_client *client; struct ion_buffer *buffer; struct rb_node node; unsigned int kmap_cnt; int id; }; static inline int msm_ion_get_device_address(struct smem_client *smem_client, struct ion_handle *hndl, unsigned long align, ion_phys_addr_t *iova, unsigned long *buffer_size, static int msm_dma_get_device_address(struct dma_buf *dbuf, unsigned long align, dma_addr_t *iova, unsigned long *buffer_size, unsigned long flags, enum hal_buffer buffer_type, unsigned long session_type, struct msm_vidc_platform_resources *res, struct dma_mapping_info *mapping_info) { return -ENODEV; int rc = 0; struct dma_buf_attachment *attach; struct sg_table *table = NULL; struct context_bank_info *cb = NULL; if (!dbuf || !iova || !buffer_size || !mapping_info) { dprintk(VIDC_ERR, "Invalid params: %pK, %pK, %pK, %pK\n", dbuf, iova, buffer_size, mapping_info); return -EINVAL; } if (is_iommu_present(res)) { cb = msm_smem_get_context_bank( session_type, (flags & SMEM_SECURE), res, buffer_type); if (!cb) { dprintk(VIDC_ERR, "%s: Failed to get context bank device\n", __func__); rc = -EIO; goto mem_map_failed; } /* Check if the dmabuf size matches expected size */ if (dbuf->size < *buffer_size) { rc = -EINVAL; dprintk(VIDC_ERR, "Size mismatch: Dmabuf size: %zu Expected Size: %lu", dbuf->size, *buffer_size); msm_vidc_res_handle_fatal_hw_error(res, true); goto mem_buf_size_mismatch; } /* Prepare a dma buf for dma on the given device */ attach = dma_buf_attach(dbuf, cb->dev); if (IS_ERR_OR_NULL(attach)) { rc = PTR_ERR(attach) ?: -ENOMEM; dprintk(VIDC_ERR, "Failed to attach dmabuf\n"); goto mem_buf_attach_failed; } /* * Get the scatterlist for the given attachment * Mapping of sg is taken care by map attachment */ attach->dma_map_attrs = DMA_ATTR_DELAYED_UNMAP; if (res->sys_cache_present) attach->dma_map_attrs |= DMA_ATTR_IOMMU_USE_UPSTREAM_HINT; table = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); if (IS_ERR_OR_NULL(table)) { rc = PTR_ERR(table) ?: -ENOMEM; dprintk(VIDC_ERR, "Failed to map table\n"); goto mem_map_table_failed; } static inline int msm_ion_put_device_address(struct smem_client *smem_client, struct ion_handle *hndl, u32 flags, /* debug trace's need to be updated later */ trace_msm_smem_buffer_iommu_op_start("MAP", 0, 0, align, *iova, *buffer_size); if (table->sgl) { *iova = table->sgl->dma_address; *buffer_size = table->sgl->dma_length; } else { dprintk(VIDC_ERR, "sgl is NULL\n"); rc = -ENOMEM; goto mem_map_sg_failed; } mapping_info->dev = cb->dev; mapping_info->mapping = cb->mapping; mapping_info->table = table; mapping_info->attach = attach; mapping_info->buf = dbuf; mapping_info->cb_info = (void *)cb; trace_msm_smem_buffer_iommu_op_end("MAP", 0, 0, align, *iova, *buffer_size); } else { dprintk(VIDC_DBG, "iommu not present, use phys mem addr\n"); } return 0; mem_map_sg_failed: dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL); mem_map_table_failed: dma_buf_detach(dbuf, attach); mem_buf_size_mismatch: mem_buf_attach_failed: dma_buf_put(dbuf); mem_map_failed: return rc; } static int msm_dma_put_device_address(u32 flags, struct dma_mapping_info *mapping_info, enum hal_buffer buffer_type) { return -ENODEV; int rc = 0; struct context_bank_info *cb = NULL; if (!mapping_info) { dprintk(VIDC_WARN, "Invalid mapping_info\n"); return -EINVAL; } static inline void *msm_ion_get_dma_buf(int fd) { return ERR_PTR(-ENODEV); if (!mapping_info->dev || !mapping_info->table || !mapping_info->buf || !mapping_info->attach || !mapping_info->cb_info) { dprintk(VIDC_WARN, "Invalid params\n"); return -EINVAL; } void *msm_smem_get_dma_buf(int fd) { return ERR_PTR(-ENODEV); trace_msm_smem_buffer_iommu_op_start("UNMAP", 0, 0, 0, 0, 0); msm_dma_unmap_sg(mapping_info->dev, mapping_info->table->sgl, mapping_info->table->nents, DMA_BIDIRECTIONAL, mapping_info->buf); dma_buf_unmap_attachment(mapping_info->attach, mapping_info->table, DMA_BIDIRECTIONAL); dma_buf_detach(mapping_info->buf, mapping_info->attach); dma_buf_put(mapping_info->buf); trace_msm_smem_buffer_iommu_op_end("UNMAP", 0, 0, 0, 0, 0); mapping_info->dev = NULL; mapping_info->mapping = NULL; mapping_info->table = NULL; mapping_info->attach = NULL; mapping_info->buf = NULL; mapping_info->cb_info = NULL; return rc; } static inline void msm_ion_put_dma_buf(struct dma_buf *dma_buf) struct dma_buf *msm_smem_get_dma_buf(int fd) { struct dma_buf *dma_buf; dma_buf = dma_buf_get(fd); if (IS_ERR_OR_NULL(dma_buf)) { dprintk(VIDC_ERR, "Failed to get dma_buf for %d, error %ld\n", fd, PTR_ERR(dma_buf)); dma_buf = NULL; } return dma_buf; } void msm_smem_put_dma_buf(void *dma_buf) { if (!dma_buf) { dprintk(VIDC_ERR, "%s: NULL dma_buf\n", __func__); return; } static inline struct ion_handle *msm_ion_get_handle(void *ion_client, struct dma_buf *dma_buf) { return ERR_PTR(-ENODEV); dma_buf_put((struct dma_buf *)dma_buf); return; } void *msm_smem_get_handle(struct smem_client *client, void *dma_buf) int msm_smem_map_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem) { return ERR_PTR(-ENODEV); int rc = 0; dma_addr_t iova = 0; u32 temp = 0; unsigned long buffer_size = 0; unsigned long align = SZ_4K; unsigned long dma_flags = 0; struct dma_buf *dbuf; unsigned long ion_flags = 0; if (!inst || !smem) { dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n", __func__, inst, smem); rc = -EINVAL; goto exit; } static inline void msm_ion_put_handle(struct ion_client *ion_client, struct ion_handle *ion_handle) { if (smem->refcount) { smem->refcount++; goto exit; } void msm_smem_put_handle(struct smem_client *client, void *handle) { dbuf = msm_smem_get_dma_buf(smem->fd); if (!dbuf) { rc = -EINVAL; goto exit; } static inline int msm_ion_map_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem) { return -ENODEV; smem->dma_buf = dbuf; rc = dma_buf_get_flags(dbuf, &ion_flags); if (rc) { dprintk(VIDC_ERR, "Failed to get dma buf flags: %d\n", rc); goto exit; } if (ion_flags & ION_FLAG_CACHED) smem->flags |= SMEM_CACHED; int msm_smem_map_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem) { return -ENODEV; if (ion_flags & ION_FLAG_SECURE) smem->flags |= SMEM_SECURE; buffer_size = smem->size; rc = msm_dma_get_device_address(dbuf, align, &iova, &buffer_size, smem->flags, smem->buffer_type, inst->session_type, &(inst->core->resources), &smem->mapping_info); if (rc) { dprintk(VIDC_ERR, "Failed to get device address: %d\n", rc); goto exit; } temp = (u32)iova; if ((dma_addr_t)temp != iova) { dprintk(VIDC_ERR, "iova(%pa) truncated to %#x", &iova, temp); rc = -EINVAL; goto exit; } static inline int msm_ion_unmap_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem) { return -ENODEV; smem->device_addr = (u32)iova + smem->offset; smem->refcount++; exit: return rc; } int msm_smem_unmap_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem) { return -ENODEV; int rc = 0; if (!inst || !smem) { dprintk(VIDC_ERR, "%s: Invalid params: %pK %pK\n", __func__, inst, smem); rc = -EINVAL; goto exit; } static inline int get_secure_flag_for_buffer_type( struct smem_client *client, enum hal_buffer buffer_type) { return -ENODEV; if (smem->refcount) { smem->refcount--; } else { dprintk(VIDC_WARN, "unmap called while refcount is zero already\n"); return -EINVAL; } static inline int alloc_ion_mem(struct smem_client *client, size_t size, u32 align, u32 flags, enum hal_buffer buffer_type, struct msm_smem *mem, int map_kernel) { return -ENODEV; if (smem->refcount) goto exit; rc = msm_dma_put_device_address(smem->flags, &smem->mapping_info, smem->buffer_type); if (rc) { dprintk(VIDC_ERR, "Failed to put device address: %d\n", rc); goto exit; } static inline int free_ion_mem(struct smem_client *client, struct msm_smem *mem) msm_smem_put_dma_buf(smem->dma_buf); smem->device_addr = 0x0; smem->dma_buf = NULL; exit: return rc; } static int get_secure_flag_for_buffer_type( u32 session_type, enum hal_buffer buffer_type) { return -ENODEV; switch (buffer_type) { case HAL_BUFFER_INPUT: if (session_type == MSM_VIDC_ENCODER) return ION_FLAG_CP_PIXEL; else return ION_FLAG_CP_BITSTREAM; case HAL_BUFFER_OUTPUT: case HAL_BUFFER_OUTPUT2: if (session_type == MSM_VIDC_ENCODER) return ION_FLAG_CP_BITSTREAM; else return ION_FLAG_CP_PIXEL; case HAL_BUFFER_INTERNAL_SCRATCH: return ION_FLAG_CP_BITSTREAM; case HAL_BUFFER_INTERNAL_SCRATCH_1: return ION_FLAG_CP_NON_PIXEL; case HAL_BUFFER_INTERNAL_SCRATCH_2: return ION_FLAG_CP_PIXEL; case HAL_BUFFER_INTERNAL_PERSIST: return ION_FLAG_CP_BITSTREAM; case HAL_BUFFER_INTERNAL_PERSIST_1: return ION_FLAG_CP_NON_PIXEL; default: WARN(1, "No matching secure flag for buffer type : %x\n", buffer_type); return -EINVAL; } } static inline void *ion_new_client(void) static int alloc_dma_mem(size_t size, u32 align, u32 flags, enum hal_buffer buffer_type, int map_kernel, struct msm_vidc_platform_resources *res, u32 session_type, struct msm_smem *mem) { return ERR_PTR(-ENODEV); }; dma_addr_t iova = 0; unsigned long buffer_size = 0; unsigned long heap_mask = 0; int rc = 0; int ion_flags = 0; struct dma_buf *dbuf = NULL; unsigned long page_count = 0; unsigned long mapped_pages = 0; void *kvaddr = NULL; static inline void ion_delete_client(struct smem_client *client) if (!res) { dprintk(VIDC_ERR, "%s: NULL res\n", __func__); return -EINVAL; } align = ALIGN(align, SZ_4K); size = ALIGN(size, SZ_4K); if (is_iommu_present(res)) { heap_mask = ION_HEAP(ION_SYSTEM_HEAP_ID); } else { dprintk(VIDC_DBG, "allocate shared memory from adsp heap size %zx align %d\n", size, align); heap_mask = ION_HEAP(ION_ADSP_HEAP_ID); } if (flags & SMEM_CACHED) ion_flags |= ION_FLAG_CACHED; if (flags & SMEM_SECURE) { int secure_flag = get_secure_flag_for_buffer_type( session_type, buffer_type); if (secure_flag < 0) { rc = secure_flag; goto fail_shared_mem_alloc; } ion_flags |= ION_FLAG_SECURE | secure_flag; heap_mask = ION_HEAP(ION_SECURE_HEAP_ID); if (res->slave_side_cp) { heap_mask = ION_HEAP(ION_CP_MM_HEAP_ID); size = ALIGN(size, SZ_1M); align = ALIGN(size, SZ_1M); } } trace_msm_smem_buffer_dma_op_start("ALLOC", (u32)buffer_type, heap_mask, size, align, flags, map_kernel); dbuf = ion_alloc(size, heap_mask, ion_flags); if (IS_ERR_OR_NULL(dbuf)) { dprintk(VIDC_ERR, "Failed to allocate shared memory = %zx, %#x\n", size, flags); rc = -ENOMEM; goto fail_shared_mem_alloc; } trace_msm_smem_buffer_dma_op_end("ALLOC", (u32)buffer_type, heap_mask, size, align, flags, map_kernel); mem->flags = flags; mem->buffer_type = buffer_type; mem->offset = 0; mem->size = size; mem->dma_buf = dbuf; if (map_kernel) { mem->pages = size/PAGE_SIZE; for (page_count = 1; page_count <= mem->pages; page_count++) { kvaddr = dma_buf_kmap(dbuf, page_count); if (IS_ERR_OR_NULL(kvaddr)) { dprintk(VIDC_ERR, "Failed to map shared mem in kernel\n"); rc = -EIO; goto fail_map; } if (page_count == 1) mem->kvaddr = kvaddr; } } else { mem->kvaddr = NULL; mem->pages = 0; } rc = msm_dma_get_device_address(dbuf, align, &iova, &buffer_size, flags, buffer_type, session_type, res, &mem->mapping_info); if (rc) { dprintk(VIDC_ERR, "Failed to get device address: %d\n", rc); goto fail_device_address; } mem->device_addr = (u32)iova; if ((dma_addr_t)mem->device_addr != iova) { dprintk(VIDC_ERR, "iova(%pa) truncated to %#x", &iova, mem->device_addr); goto fail_device_address; } dprintk(VIDC_DBG, "%s: dma_buf = %pK, device_addr = %x, size = %d, kvaddr = %pK, buffer_type = %#x, flags = %#lx\n", __func__, mem->dma_buf, mem->device_addr, mem->size, mem->kvaddr, mem->buffer_type, mem->flags); return rc; fail_device_address: mapped_pages = mem->pages; fail_map: mapped_pages = page_count; if (mem->kvaddr) { kvaddr = mem->kvaddr; for (page_count = 1; page_count < mapped_pages; page_count++) { dma_buf_kunmap(mem->dma_buf, page_count, kvaddr); kvaddr += PAGE_SIZE; } mem->pages = 0; mem->kvaddr = NULL; } dma_buf_put(dbuf); fail_shared_mem_alloc: return rc; } static int free_dma_mem(struct msm_smem *mem) { int rc = 0; void *kvaddr; unsigned long page_count = 0; dprintk(VIDC_DBG, "%s: dma_buf = %pK, device_addr = %x, size = %d, kvaddr = %pK, buffer_type = %#x\n", __func__, mem->dma_buf, mem->device_addr, mem->size, mem->kvaddr, mem->buffer_type); if (mem->device_addr) { msm_dma_put_device_address(mem->flags, &mem->mapping_info, mem->buffer_type); mem->device_addr = 0x0; } static inline int msm_ion_cache_operations(void *ion_client, void *ion_handle, if (mem->kvaddr) { kvaddr = mem->kvaddr; for (page_count = 1; page_count <= mem->pages; page_count++) { dma_buf_kunmap(mem->dma_buf, page_count, kvaddr); kvaddr += PAGE_SIZE; } mem->pages = 0; mem->kvaddr = NULL; } if (mem->dma_buf) { trace_msm_smem_buffer_dma_op_start("FREE", (u32)mem->buffer_type, -1, mem->size, -1, mem->flags, -1); dma_buf_put(mem->dma_buf); mem->dma_buf = NULL; trace_msm_smem_buffer_dma_op_end("FREE", (u32)mem->buffer_type, -1, mem->size, -1, mem->flags, -1); } return rc; } int msm_smem_cache_operations(struct dma_buf *dbuf, unsigned long offset, unsigned long size, enum smem_cache_ops cache_op) { return -ENODEV; int rc = 0; unsigned long flags = 0; int msm_cache_ops = 0; if (!dbuf) { dprintk(VIDC_ERR, "%s: NULL dma_buf\n", __func__); return -EINVAL; } int msm_smem_cache_operations(struct smem_client *client, void *handle, unsigned long offset, unsigned long size, enum smem_cache_ops cache_op) { return -ENODEV; /* Identify the get flags & offset based cache operation */ rc = dma_buf_get_flags(dbuf, &flags); if (rc) { dprintk(VIDC_ERR, "Failed to get dma buf flags: %d\n", rc); goto exit; } if (!(flags & ION_FLAG_CACHED)) goto exit; void *msm_smem_new_client(enum smem_type mtype, void *platform_resources, enum session_type stype) { return ERR_PTR(-ENODEV); /* Partial clean of dma buf api is in progress */ switch (cache_op) { case SMEM_CACHE_CLEAN: case SMEM_CACHE_INVALIDATE: case SMEM_CACHE_CLEAN_INVALIDATE: dma_buf_begin_cpu_access(dbuf, DMA_BIDIRECTIONAL); dma_buf_end_cpu_access(dbuf, DMA_BIDIRECTIONAL); break; default: dprintk(VIDC_ERR, "%s: cache (%d) operation not supported\n", __func__, cache_op); rc = -EINVAL; goto exit; } if (rc) { dprintk(VIDC_ERR, "%s: cache operation failed dma_buf %pK, %d, offset %lu, size %lu, msm_cache_ops %u\n", __func__, rc, dbuf, offset, size, msm_cache_ops); goto exit; } int msm_smem_alloc(struct smem_client *client, size_t size, u32 align, u32 flags, enum hal_buffer buffer_type, int map_kernel, struct msm_smem *smem) exit: return rc; } int msm_smem_alloc(size_t size, u32 align, u32 flags, enum hal_buffer buffer_type, int map_kernel, void *res, u32 session_type, struct msm_smem *smem) { return -ENODEV; int rc = 0; if (!smem || !size) { dprintk(VIDC_ERR, "%s: NULL smem or %d size\n", __func__, (u32)size); return -EINVAL; } int msm_smem_free(void *clt, struct msm_smem *smem) rc = alloc_dma_mem(size, align, flags, buffer_type, map_kernel, (struct msm_vidc_platform_resources *)res, session_type, smem); return rc; } int msm_smem_free(struct msm_smem *smem) { return -ENODEV; int rc = 0; if (!smem) { dprintk(VIDC_ERR, "NULL smem passed\n"); return -EINVAL; } rc = free_dma_mem(smem); return rc; }; void msm_smem_delete_client(void *clt) struct context_bank_info *msm_smem_get_context_bank(u32 session_type, bool is_secure, struct msm_vidc_platform_resources *res, enum hal_buffer buffer_type) { struct context_bank_info *cb = NULL, *match = NULL; /* * HAL_BUFFER_INPUT is directly mapped to bitstream CB in DT * as the buffer type structure was initially designed * just for decoder. For Encoder, input should be mapped to * yuvpixel CB. So swap the buffer types just in this local scope. */ if (is_secure && session_type == MSM_VIDC_ENCODER) { if (buffer_type == HAL_BUFFER_INPUT) buffer_type = HAL_BUFFER_OUTPUT; else if (buffer_type == HAL_BUFFER_OUTPUT) buffer_type = HAL_BUFFER_INPUT; } struct context_bank_info *msm_smem_get_context_bank(void *clt, bool is_secure, enum hal_buffer buffer_type) { return ERR_PTR(-ENODEV); list_for_each_entry(cb, &res->context_banks, list) { if (cb->is_secure == is_secure && cb->buffer_type & buffer_type) { match = cb; break; } } if (!match) dprintk(VIDC_ERR, "%s: cb not found for buffer_type %x, is_secure %d\n", __func__, buffer_type, is_secure); return match; }
drivers/media/platform/msm/vidc/msm_vidc.c +1 −10 Original line number Diff line number Diff line Loading @@ -460,7 +460,7 @@ int msm_vidc_release_buffer(void *instance, int type, unsigned int index) rc = msm_comm_try_state(inst, MSM_VIDC_RELEASE_RESOURCES_DONE); if (rc) { dprintk(VIDC_ERR, "%s: Failed to move inst: %pK to rel res done", "%s: Failed to move inst: %pK to rel res done\n", __func__, inst); } } Loading Loading @@ -1645,12 +1645,6 @@ void *msm_vidc_open(int core_id, int session_type) i <= SESSION_MSG_INDEX(SESSION_MSG_END); i++) { init_completion(&inst->completions[i]); } inst->mem_client = msm_smem_new_client(SMEM_DMA, &inst->core->resources, session_type); if (!inst->mem_client) { dprintk(VIDC_ERR, "Failed to create memory client\n"); goto fail_mem_client; } if (session_type == MSM_VIDC_DECODER) { msm_vdec_inst_init(inst); Loading Loading @@ -1716,8 +1710,6 @@ void *msm_vidc_open(int core_id, int session_type) vb2_queue_release(&inst->bufq[CAPTURE_PORT].vb2_bufq); fail_bufq_capture: msm_comm_ctrl_deinit(inst); msm_smem_delete_client(inst->mem_client); fail_mem_client: mutex_destroy(&inst->sync_lock); mutex_destroy(&inst->bufq[CAPTURE_PORT].lock); mutex_destroy(&inst->bufq[OUTPUT_PORT].lock); Loading Loading @@ -1898,7 +1890,6 @@ int msm_vidc_close(void *instance) } msm_comm_session_clean(inst); msm_smem_delete_client(inst->mem_client); kref_put(&inst->kref, close_helper); return 0; Loading
drivers/media/platform/msm/vidc/msm_vidc_common.c +17 −24 Original line number Diff line number Diff line Loading @@ -1539,13 +1539,13 @@ static void handle_session_init_done(enum hal_command_response cmd, void *data) print_cap("max_work_modes", &inst->capability.max_work_modes); print_cap("ubwc_cr_stats", &inst->capability.ubwc_cr_stats); dprintk(VIDC_DBG, "profile count : %u", dprintk(VIDC_DBG, "profile count : %u\n", inst->capability.profile_level.profile_count); for (i = 0; i < inst->capability.profile_level.profile_count; i++) { profile_level = &inst->capability.profile_level.profile_level[i]; dprintk(VIDC_DBG, "profile : %u ", profile_level->profile); dprintk(VIDC_DBG, "level : %u ", profile_level->level); dprintk(VIDC_DBG, "profile : %u\n", profile_level->profile); dprintk(VIDC_DBG, "level : %u\n", profile_level->level); } signal_session_msg_receipt(cmd, inst); Loading Loading @@ -3342,7 +3342,7 @@ static int set_output_buffers(struct msm_vidc_inst *inst, { int rc = 0; struct internal_buf *binfo = NULL; u32 smem_flags = 0, buffer_size; u32 smem_flags = SMEM_UNCACHED, buffer_size; struct hal_buffer_requirements *output_buf, *extradata_buf; int i; struct hfi_device *hdev; Loading Loading @@ -3568,7 +3568,7 @@ static int allocate_and_set_internal_bufs(struct msm_vidc_inst *inst, struct msm_vidc_list *buf_list) { struct internal_buf *binfo; u32 smem_flags = 0; u32 smem_flags = SMEM_UNCACHED; int rc = 0; int i = 0; Loading Loading @@ -3837,7 +3837,7 @@ int msm_vidc_comm_cmd(void *instance, union msm_v4l2_cmd *cmd) case V4L2_DEC_CMD_STOP: { struct eos_buf *binfo = NULL; u32 smem_flags = 0; u32 smem_flags = SMEM_UNCACHED; get_inst(inst->core, inst); Loading Loading @@ -5318,8 +5318,9 @@ int msm_comm_smem_alloc(struct msm_vidc_inst *inst, dprintk(VIDC_ERR, "%s: invalid inst: %pK\n", __func__, inst); return -EINVAL; } rc = msm_smem_alloc(inst->mem_client, size, align, flags, buffer_type, map_kernel, smem); rc = msm_smem_alloc(size, align, flags, buffer_type, map_kernel, &(inst->core->resources), inst->session_type, smem); return rc; } Loading @@ -5330,7 +5331,7 @@ void msm_comm_smem_free(struct msm_vidc_inst *inst, struct msm_smem *mem) "%s: invalid params: %pK %pK\n", __func__, inst, mem); return; } msm_smem_free(inst->mem_client, mem); msm_smem_free(mem); } int msm_comm_smem_cache_operations(struct msm_vidc_inst *inst, Loading @@ -5341,7 +5342,7 @@ int msm_comm_smem_cache_operations(struct msm_vidc_inst *inst, "%s: invalid params: %pK %pK\n", __func__, inst, mem); return -EINVAL; } return msm_smem_cache_operations(inst->mem_client, NULL, return msm_smem_cache_operations(mem->dma_buf, mem->offset, mem->size, cache_ops); } Loading @@ -5349,8 +5350,6 @@ int msm_comm_qbuf_cache_operations(struct msm_vidc_inst *inst, struct v4l2_buffer *b) { int rc = 0, i; void *dma_buf; void *handle; bool skip; if (!inst || !b) { Loading @@ -5362,10 +5361,9 @@ int msm_comm_qbuf_cache_operations(struct msm_vidc_inst *inst, for (i = 0; i < b->length; i++) { unsigned long offset, size; enum smem_cache_ops cache_ops; struct dma_buf *dbuf; dma_buf = msm_smem_get_dma_buf(b->m.planes[i].m.fd); handle = msm_smem_get_handle(inst->mem_client, dma_buf); dbuf = msm_smem_get_dma_buf(b->m.planes[i].m.fd); offset = b->m.planes[i].data_offset; size = b->m.planes[i].length; cache_ops = SMEM_CACHE_INVALIDATE; Loading Loading @@ -5400,15 +5398,14 @@ int msm_comm_qbuf_cache_operations(struct msm_vidc_inst *inst, } if (!skip) { rc = msm_smem_cache_operations(inst->mem_client, handle, rc = msm_smem_cache_operations(dbuf, offset, size, cache_ops); if (rc) print_v4l2_buffer(VIDC_ERR, "qbuf cache ops failed", inst, b); } msm_smem_put_handle(inst->mem_client, handle); msm_smem_put_dma_buf(dma_buf); msm_smem_put_dma_buf(dbuf); } return rc; Loading @@ -5418,8 +5415,7 @@ int msm_comm_dqbuf_cache_operations(struct msm_vidc_inst *inst, struct v4l2_buffer *b) { int rc = 0, i; void *dma_buf; void *handle; struct dma_buf *dma_buf; bool skip; if (!inst || !b) { Loading @@ -5433,8 +5429,6 @@ int msm_comm_dqbuf_cache_operations(struct msm_vidc_inst *inst, enum smem_cache_ops cache_ops; dma_buf = msm_smem_get_dma_buf(b->m.planes[i].m.fd); handle = msm_smem_get_handle(inst->mem_client, dma_buf); offset = b->m.planes[i].data_offset; size = b->m.planes[i].length; cache_ops = SMEM_CACHE_INVALIDATE; Loading @@ -5461,14 +5455,13 @@ int msm_comm_dqbuf_cache_operations(struct msm_vidc_inst *inst, } if (!skip) { rc = msm_smem_cache_operations(inst->mem_client, handle, rc = msm_smem_cache_operations(dma_buf, offset, size, cache_ops); if (rc) print_v4l2_buffer(VIDC_ERR, "dqbuf cache ops failed", inst, b); } msm_smem_put_handle(inst->mem_client, handle); msm_smem_put_dma_buf(dma_buf); } Loading
drivers/media/platform/msm/vidc/msm_vidc_internal.h +12 −16 Original line number Diff line number Diff line Loading @@ -382,7 +382,6 @@ struct msm_vidc_inst { struct msm_vidc_list etb_data; struct msm_vidc_list fbd_data; struct buffer_requirements buff_req; struct smem_client *mem_client; struct v4l2_ctrl_handler ctrl_handler; struct completion completions[SESSION_MSG_END - SESSION_MSG_START + 1]; struct v4l2_ctrl **cluster; Loading Loading @@ -454,24 +453,21 @@ struct msm_vidc_buffer { }; void msm_comm_handle_thermal_event(void); void *msm_smem_new_client(enum smem_type mtype, void *platform_resources, enum session_type stype); int msm_smem_alloc(struct smem_client *client, size_t size, u32 align, u32 flags, enum hal_buffer buffer_type, int map_kernel, struct msm_smem *smem); int msm_smem_free(void *clt, struct msm_smem *mem); void msm_smem_delete_client(void *clt); struct context_bank_info *msm_smem_get_context_bank(void *clt, bool is_secure, enum hal_buffer buffer_type); int msm_smem_alloc(size_t size, u32 align, u32 flags, enum hal_buffer buffer_type, int map_kernel, void *res, u32 session_type, struct msm_smem *smem); int msm_smem_free(struct msm_smem *smem); int msm_smem_cache_operations(struct dma_buf *dbuf, unsigned long offset, unsigned long size, enum smem_cache_ops cache_op); struct context_bank_info *msm_smem_get_context_bank(u32 session_type, bool is_secure, struct msm_vidc_platform_resources *res, enum hal_buffer buffer_type); int msm_smem_map_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem); int msm_smem_unmap_dma_buf(struct msm_vidc_inst *inst, struct msm_smem *smem); void *msm_smem_get_dma_buf(int fd); struct dma_buf *msm_smem_get_dma_buf(int fd); void msm_smem_put_dma_buf(void *dma_buf); void *msm_smem_get_handle(struct smem_client *client, void *dma_buf); void msm_smem_put_handle(struct smem_client *client, void *handle); int msm_smem_cache_operations(struct smem_client *client, void *handle, unsigned long offset, unsigned long size, enum smem_cache_ops cache_op); void msm_vidc_fw_unload_handler(struct work_struct *work); /* * XXX: normally should be in msm_vidc.h, but that's meant for public APIs, Loading
drivers/media/platform/msm/vidc/venus_hfi.c +20 −35 File changed.Preview size limit exceeded, changes collapsed. Show changes