Loading drivers/gpu/msm/kgsl_sharedmem.c +29 −28 Original line number Diff line number Diff line Loading @@ -624,6 +624,9 @@ int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, uint64_t offset, uint64_t size, unsigned int op) { void *addr = NULL; struct sg_table *sgt = NULL; struct scatterlist *sg; unsigned int i, pos = 0; int ret = 0; if (size == 0 || size > UINT_MAX) Loading Loading @@ -651,11 +654,18 @@ int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, uint64_t offset, * If the buffer is not to mapped to kernel, perform cache * operations after mapping to kernel. */ if (memdesc->sgt != NULL) { struct scatterlist *sg; unsigned int i, pos = 0; if (memdesc->sgt != NULL) sgt = memdesc->sgt; else { if (memdesc->pages == NULL) return ret; sgt = kgsl_alloc_sgt_from_pages(memdesc); if (IS_ERR(sgt)) return PTR_ERR(sgt); } for_each_sg(memdesc->sgt->sgl, sg, memdesc->sgt->nents, i) { for_each_sg(sgt->sgl, sg, sgt->nents, i) { uint64_t sg_offset, sg_left; if (offset >= (pos + sg->length)) { Loading @@ -672,19 +682,10 @@ int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, uint64_t offset, break; pos += sg->length; } } else if (memdesc->pages != NULL) { addr = vmap(memdesc->pages, memdesc->page_count, VM_IOREMAP, pgprot_writecombine(PAGE_KERNEL)); if (addr == NULL) return -ENOMEM; /* Make sure the offset + size do not overflow the address */ if (addr + ((size_t) offset + (size_t) size) < addr) return -ERANGE; if (memdesc->sgt == NULL) kgsl_free_sgt(sgt); ret = kgsl_do_cache_op(NULL, addr, offset, size, op); vunmap(addr); } return ret; } EXPORT_SYMBOL(kgsl_cache_range_op); Loading Loading
drivers/gpu/msm/kgsl_sharedmem.c +29 −28 Original line number Diff line number Diff line Loading @@ -624,6 +624,9 @@ int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, uint64_t offset, uint64_t size, unsigned int op) { void *addr = NULL; struct sg_table *sgt = NULL; struct scatterlist *sg; unsigned int i, pos = 0; int ret = 0; if (size == 0 || size > UINT_MAX) Loading Loading @@ -651,11 +654,18 @@ int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, uint64_t offset, * If the buffer is not to mapped to kernel, perform cache * operations after mapping to kernel. */ if (memdesc->sgt != NULL) { struct scatterlist *sg; unsigned int i, pos = 0; if (memdesc->sgt != NULL) sgt = memdesc->sgt; else { if (memdesc->pages == NULL) return ret; sgt = kgsl_alloc_sgt_from_pages(memdesc); if (IS_ERR(sgt)) return PTR_ERR(sgt); } for_each_sg(memdesc->sgt->sgl, sg, memdesc->sgt->nents, i) { for_each_sg(sgt->sgl, sg, sgt->nents, i) { uint64_t sg_offset, sg_left; if (offset >= (pos + sg->length)) { Loading @@ -672,19 +682,10 @@ int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, uint64_t offset, break; pos += sg->length; } } else if (memdesc->pages != NULL) { addr = vmap(memdesc->pages, memdesc->page_count, VM_IOREMAP, pgprot_writecombine(PAGE_KERNEL)); if (addr == NULL) return -ENOMEM; /* Make sure the offset + size do not overflow the address */ if (addr + ((size_t) offset + (size_t) size) < addr) return -ERANGE; if (memdesc->sgt == NULL) kgsl_free_sgt(sgt); ret = kgsl_do_cache_op(NULL, addr, offset, size, op); vunmap(addr); } return ret; } EXPORT_SYMBOL(kgsl_cache_range_op); Loading