Loading drivers/misc/qseecom.c +76 −9 Original line number Diff line number Diff line Loading @@ -67,6 +67,9 @@ #define CE_CLK_DIV 1000000 #define QSEECOM_MAX_SG_ENTRY 512 #define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \ (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT) #define QSEECOM_INVALID_KEY_ID 0xff /* Save partition image hash for authentication check */ Loading Loading @@ -2940,6 +2943,53 @@ err: return -ENOMEM; } static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data, char *field, uint32_t fd_idx, struct sg_table *sg_ptr) { struct scatterlist *sg = sg_ptr->sgl; struct qseecom_sg_entry_64bit *sg_entry; struct qseecom_sg_list_buf_hdr_64bit *buf_hdr; void *buf; uint i; size_t size; dma_addr_t coh_pmem; if (fd_idx >= MAX_ION_FD) { pr_err("fd_idx [%d] is invalid\n", fd_idx); return -ENOMEM; } buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field; memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT); /* Allocate a contiguous kernel buffer */ size = sg_ptr->nents * SG_ENTRY_SZ_64BIT; size = (size + PAGE_SIZE) & PAGE_MASK; buf = dma_alloc_coherent(qseecom.pdev, size, &coh_pmem, GFP_KERNEL); if (buf == NULL) { pr_err("failed to alloc memory for sg buf\n"); return -ENOMEM; } /* update qseecom_sg_list_buf_hdr_64bit */ buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2; buf_hdr->new_buf_phys_addr = coh_pmem; buf_hdr->nents_total = sg_ptr->nents; /* save the left sg entries into new allocated buf */ sg_entry = (struct qseecom_sg_entry_64bit *)buf; for (i = 0; i < sg_ptr->nents; i++) { sg_entry->phys_addr = (uint64_t)sg_dma_address(sg); sg_entry->len = sg->length; sg_entry++; sg = sg_next(sg); } data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true; data->client.sec_buf_fd[fd_idx].vbase = buf; data->client.sec_buf_fd[fd_idx].pbase = coh_pmem; data->client.sec_buf_fd[fd_idx].size = size; return 0; } static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup, struct qseecom_dev_handle *data) { Loading Loading @@ -3008,11 +3058,27 @@ static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup, goto err; } if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) { pr_err("Num of scattered entries"); pr_err(" (%d) is greater than max supported %d\n", pr_warn("Num of scattered entries"); pr_warn(" (%d) is greater than %d\n", sg_ptr->nents, QSEECOM_MAX_SG_ENTRY); if (cleanup) { if (data->client.sec_buf_fd[i].is_sec_buf_fd && data->client.sec_buf_fd[i].vbase) dma_free_coherent(qseecom.pdev, data->client.sec_buf_fd[i].size, data->client.sec_buf_fd[i].vbase, data->client.sec_buf_fd[i].pbase); } else { ret = __qseecom_allocate_sg_list_buffer(data, field, i, sg_ptr); if (ret) { pr_err("Failed to allocate sg list buffer\n"); goto err; } } len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT; goto cleanup; } sg = sg_ptr->sgl; if (sg_ptr->nents == 1) { uint64_t *update_64bit; Loading @@ -3031,10 +3097,10 @@ static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup, (req->ifd_data[i].fd > 0)) { if ((req->cmd_req_len < SG_ENTRY_SZ * sg_ptr->nents) || SG_ENTRY_SZ_64BIT * sg_ptr->nents) || (req->ifd_data[i].cmd_buf_offset > (req->cmd_req_len - SG_ENTRY_SZ * sg_ptr->nents))) { SG_ENTRY_SZ_64BIT * sg_ptr->nents))) { pr_err("Invalid offset = 0x%x\n", req->ifd_data[i].cmd_buf_offset); goto err; Loading @@ -3044,10 +3110,10 @@ static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup, (lstnr_resp->ifd_data[i].fd > 0)) { if ((lstnr_resp->resp_len < SG_ENTRY_SZ * sg_ptr->nents) || SG_ENTRY_SZ_64BIT * sg_ptr->nents) || (lstnr_resp->ifd_data[i].cmd_buf_offset > (lstnr_resp->resp_len - SG_ENTRY_SZ * sg_ptr->nents))) { SG_ENTRY_SZ_64BIT * sg_ptr->nents))) { goto err; } } Loading @@ -3063,6 +3129,7 @@ static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup, sg = sg_next(sg); } } cleanup: if (cleanup) msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len, Loading include/uapi/linux/qseecom.h +18 −0 Original line number Diff line number Diff line Loading @@ -233,6 +233,24 @@ struct qseecom_sg_entry_64bit { uint32_t len; } __attribute__ ((packed)); /* * sg list buf format version * 1: Legacy format to support only 512 SG list entries * 2: new format to support > 512 entries */ #define QSEECOM_SG_LIST_BUF_FORMAT_VERSION_1 1 #define QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2 2 struct qseecom_sg_list_buf_hdr_64bit { struct qseecom_sg_entry_64bit blank_entry; /* must be all 0 */ uint32_t version; /* sg list buf format version */ uint64_t new_buf_phys_addr; /* PA of new buffer */ uint32_t nents_total; /* Total number of SG entries */ } __attribute__ ((packed)); #define QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT \ sizeof(struct qseecom_sg_list_buf_hdr_64bit) #define MAX_CE_PIPE_PAIR_PER_UNIT 3 #define INVALID_CE_INFO_UNIT_NUM 0xffffffff Loading Loading
drivers/misc/qseecom.c +76 −9 Original line number Diff line number Diff line Loading @@ -67,6 +67,9 @@ #define CE_CLK_DIV 1000000 #define QSEECOM_MAX_SG_ENTRY 512 #define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \ (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT) #define QSEECOM_INVALID_KEY_ID 0xff /* Save partition image hash for authentication check */ Loading Loading @@ -2940,6 +2943,53 @@ err: return -ENOMEM; } static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data, char *field, uint32_t fd_idx, struct sg_table *sg_ptr) { struct scatterlist *sg = sg_ptr->sgl; struct qseecom_sg_entry_64bit *sg_entry; struct qseecom_sg_list_buf_hdr_64bit *buf_hdr; void *buf; uint i; size_t size; dma_addr_t coh_pmem; if (fd_idx >= MAX_ION_FD) { pr_err("fd_idx [%d] is invalid\n", fd_idx); return -ENOMEM; } buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field; memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT); /* Allocate a contiguous kernel buffer */ size = sg_ptr->nents * SG_ENTRY_SZ_64BIT; size = (size + PAGE_SIZE) & PAGE_MASK; buf = dma_alloc_coherent(qseecom.pdev, size, &coh_pmem, GFP_KERNEL); if (buf == NULL) { pr_err("failed to alloc memory for sg buf\n"); return -ENOMEM; } /* update qseecom_sg_list_buf_hdr_64bit */ buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2; buf_hdr->new_buf_phys_addr = coh_pmem; buf_hdr->nents_total = sg_ptr->nents; /* save the left sg entries into new allocated buf */ sg_entry = (struct qseecom_sg_entry_64bit *)buf; for (i = 0; i < sg_ptr->nents; i++) { sg_entry->phys_addr = (uint64_t)sg_dma_address(sg); sg_entry->len = sg->length; sg_entry++; sg = sg_next(sg); } data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true; data->client.sec_buf_fd[fd_idx].vbase = buf; data->client.sec_buf_fd[fd_idx].pbase = coh_pmem; data->client.sec_buf_fd[fd_idx].size = size; return 0; } static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup, struct qseecom_dev_handle *data) { Loading Loading @@ -3008,11 +3058,27 @@ static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup, goto err; } if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) { pr_err("Num of scattered entries"); pr_err(" (%d) is greater than max supported %d\n", pr_warn("Num of scattered entries"); pr_warn(" (%d) is greater than %d\n", sg_ptr->nents, QSEECOM_MAX_SG_ENTRY); if (cleanup) { if (data->client.sec_buf_fd[i].is_sec_buf_fd && data->client.sec_buf_fd[i].vbase) dma_free_coherent(qseecom.pdev, data->client.sec_buf_fd[i].size, data->client.sec_buf_fd[i].vbase, data->client.sec_buf_fd[i].pbase); } else { ret = __qseecom_allocate_sg_list_buffer(data, field, i, sg_ptr); if (ret) { pr_err("Failed to allocate sg list buffer\n"); goto err; } } len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT; goto cleanup; } sg = sg_ptr->sgl; if (sg_ptr->nents == 1) { uint64_t *update_64bit; Loading @@ -3031,10 +3097,10 @@ static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup, (req->ifd_data[i].fd > 0)) { if ((req->cmd_req_len < SG_ENTRY_SZ * sg_ptr->nents) || SG_ENTRY_SZ_64BIT * sg_ptr->nents) || (req->ifd_data[i].cmd_buf_offset > (req->cmd_req_len - SG_ENTRY_SZ * sg_ptr->nents))) { SG_ENTRY_SZ_64BIT * sg_ptr->nents))) { pr_err("Invalid offset = 0x%x\n", req->ifd_data[i].cmd_buf_offset); goto err; Loading @@ -3044,10 +3110,10 @@ static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup, (lstnr_resp->ifd_data[i].fd > 0)) { if ((lstnr_resp->resp_len < SG_ENTRY_SZ * sg_ptr->nents) || SG_ENTRY_SZ_64BIT * sg_ptr->nents) || (lstnr_resp->ifd_data[i].cmd_buf_offset > (lstnr_resp->resp_len - SG_ENTRY_SZ * sg_ptr->nents))) { SG_ENTRY_SZ_64BIT * sg_ptr->nents))) { goto err; } } Loading @@ -3063,6 +3129,7 @@ static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup, sg = sg_next(sg); } } cleanup: if (cleanup) msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len, Loading
include/uapi/linux/qseecom.h +18 −0 Original line number Diff line number Diff line Loading @@ -233,6 +233,24 @@ struct qseecom_sg_entry_64bit { uint32_t len; } __attribute__ ((packed)); /* * sg list buf format version * 1: Legacy format to support only 512 SG list entries * 2: new format to support > 512 entries */ #define QSEECOM_SG_LIST_BUF_FORMAT_VERSION_1 1 #define QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2 2 struct qseecom_sg_list_buf_hdr_64bit { struct qseecom_sg_entry_64bit blank_entry; /* must be all 0 */ uint32_t version; /* sg list buf format version */ uint64_t new_buf_phys_addr; /* PA of new buffer */ uint32_t nents_total; /* Total number of SG entries */ } __attribute__ ((packed)); #define QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT \ sizeof(struct qseecom_sg_list_buf_hdr_64bit) #define MAX_CE_PIPE_PAIR_PER_UNIT 3 #define INVALID_CE_INFO_UNIT_NUM 0xffffffff Loading