Loading drivers/dma-buf/Kconfig +11 −0 Original line number Diff line number Diff line Loading @@ -30,6 +30,17 @@ config SW_SYNC WARNING: improper use of this can result in deadlocking kernel drivers from userspace. Intended for test and debug only. config DEBUG_DMA_BUF_REF bool "DEBUG Reference Count" depends on STACKDEPOT depends on DMA_SHARED_BUFFER default n help Save stack traces for every call to dma_buf_get and dma_buf_put, to help debug memory leaks. Potential leaks may be found by manually matching the get/put call stacks. This feature consumes extra memory in order to save the stack traces using STACKDEPOT. config UDMABUF bool "userspace dmabuf misc driver" default n Loading drivers/dma-buf/Makefile +1 −0 Original line number Diff line number Diff line Loading @@ -3,4 +3,5 @@ obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \ reservation.o seqno-fence.o obj-$(CONFIG_SYNC_FILE) += sync_file.o obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o obj-$(CONFIG_DEBUG_DMA_BUF_REF) += dma-buf-ref.o obj-$(CONFIG_UDMABUF) += udmabuf.o drivers/dma-buf/dma-buf-ref.c 0 → 100644 +114 −0 Original line number Diff line number Diff line // SPDX-License-Identifier: GPL-2.0-only #include <linux/dma-buf.h> #include <linux/slab.h> #include <linux/stackdepot.h> #include <linux/stacktrace.h> #include <linux/seq_file.h> #define DMA_BUF_STACK_DEPTH (16) struct dma_buf_ref { struct list_head list; depot_stack_handle_t handle; int count; }; void dma_buf_ref_init(struct msm_dma_buf *msm_dma_buf) { INIT_LIST_HEAD(&msm_dma_buf->refs); } void dma_buf_ref_destroy(struct msm_dma_buf *msm_dma_buf) { struct dma_buf_ref *r, *n; struct dma_buf *dmabuf = &msm_dma_buf->dma_buf; mutex_lock(&dmabuf->lock); list_for_each_entry_safe(r, n, &msm_dma_buf->refs, list) { list_del(&r->list); kfree(r); } mutex_unlock(&dmabuf->lock); } static void dma_buf_ref_insert_handle(struct msm_dma_buf *msm_dma_buf, depot_stack_handle_t handle, int count) { struct dma_buf_ref *r; struct dma_buf *dmabuf = &msm_dma_buf->dma_buf; mutex_lock(&dmabuf->lock); list_for_each_entry(r, &msm_dma_buf->refs, list) { if (r->handle == handle) { r->count += count; goto out; } } r = kzalloc(sizeof(*r), GFP_KERNEL); if (!r) goto out; INIT_LIST_HEAD(&r->list); r->handle = handle; r->count = count; list_add(&r->list, &msm_dma_buf->refs); out: mutex_unlock(&dmabuf->lock); } void dma_buf_ref_mod(struct msm_dma_buf *msm_dma_buf, int nr) { unsigned long entries[DMA_BUF_STACK_DEPTH]; struct stack_trace trace = { .nr_entries = 0, .entries = entries, .max_entries = DMA_BUF_STACK_DEPTH, .skip = 1 }; depot_stack_handle_t handle; save_stack_trace(&trace); if (trace.nr_entries != 0 && trace.entries[trace.nr_entries-1] == ULONG_MAX) trace.nr_entries--; handle = depot_save_stack(&trace, GFP_KERNEL); if (!handle) return; dma_buf_ref_insert_handle(msm_dma_buf, handle, nr); } /** * Called with dmabuf->lock held */ int dma_buf_ref_show(struct seq_file *s, struct msm_dma_buf *msm_dma_buf) { char *buf; struct dma_buf_ref *ref; int count = 0; struct stack_trace trace; buf = (void *)__get_free_page(GFP_KERNEL); if (!buf) return -ENOMEM; list_for_each_entry(ref, &msm_dma_buf->refs, list) { count += ref->count; seq_printf(s, "References: %d\n", ref->count); depot_fetch_stack(ref->handle, &trace); snprint_stack_trace(buf, PAGE_SIZE, &trace, 0); seq_puts(s, buf); seq_putc(s, '\n'); } seq_printf(s, "Total references: %d\n\n\n", count); free_page((unsigned long)buf); return 0; } drivers/dma-buf/dma-buf.c +19 −7 Original line number Diff line number Diff line Loading @@ -79,12 +79,14 @@ static struct file_system_type dma_buf_fs_type = { static int dma_buf_release(struct inode *inode, struct file *file) { struct msm_dma_buf *msm_dma_buf; struct dma_buf *dmabuf; if (!is_dma_buf_file(file)) return -EINVAL; dmabuf = file->private_data; msm_dma_buf = to_msm_dma_buf(dmabuf); BUG_ON(dmabuf->vmapping_counter); Loading @@ -98,17 +100,18 @@ static int dma_buf_release(struct inode *inode, struct file *file) */ BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active); dmabuf->ops->release(dmabuf); mutex_lock(&db_list.lock); list_del(&dmabuf->list_node); mutex_unlock(&db_list.lock); dmabuf->ops->release(dmabuf); dma_buf_ref_destroy(msm_dma_buf); if (dmabuf->resv == (struct reservation_object *)&dmabuf[1]) reservation_object_fini(dmabuf->resv); module_put(dmabuf->owner); kfree(dmabuf); kfree(msm_dma_buf); return 0; } Loading Loading @@ -505,10 +508,11 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags) */ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) { struct msm_dma_buf *msm_dma_buf; struct dma_buf *dmabuf; struct reservation_object *resv = exp_info->resv; struct file *file; size_t alloc_size = sizeof(struct dma_buf); size_t alloc_size = sizeof(struct msm_dma_buf); int ret; if (!exp_info->resv) Loading @@ -528,12 +532,13 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) if (!try_module_get(exp_info->owner)) return ERR_PTR(-ENOENT); dmabuf = kzalloc(alloc_size, GFP_KERNEL); if (!dmabuf) { msm_dma_buf = kzalloc(alloc_size, GFP_KERNEL); if (!msm_dma_buf) { ret = -ENOMEM; goto err_module; } dmabuf = &msm_dma_buf->dma_buf; dmabuf->priv = exp_info->priv; dmabuf->ops = exp_info->ops; dmabuf->size = exp_info->size; Loading Loading @@ -561,6 +566,9 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) mutex_init(&dmabuf->lock); INIT_LIST_HEAD(&dmabuf->attachments); dma_buf_ref_init(msm_dma_buf); dma_buf_ref_mod(msm_dma_buf, 1); mutex_lock(&db_list.lock); list_add(&dmabuf->list_node, &db_list.head); mutex_unlock(&db_list.lock); Loading @@ -568,7 +576,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) return dmabuf; err_dmabuf: kfree(dmabuf); kfree(msm_dma_buf); err_module: module_put(exp_info->owner); return ERR_PTR(ret); Loading Loading @@ -620,6 +628,7 @@ struct dma_buf *dma_buf_get(int fd) fput(file); return ERR_PTR(-EINVAL); } dma_buf_ref_mod(to_msm_dma_buf(file->private_data), 1); return file->private_data; } Loading @@ -640,6 +649,7 @@ void dma_buf_put(struct dma_buf *dmabuf) if (WARN_ON(!dmabuf || !dmabuf->file)) return; dma_buf_ref_mod(to_msm_dma_buf(dmabuf), -1); fput(dmabuf->file); } EXPORT_SYMBOL_GPL(dma_buf_put); Loading Loading @@ -1281,6 +1291,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) seq_printf(s, "Total %d devices attached\n\n", attach_count); dma_buf_ref_show(s, to_msm_dma_buf(buf_obj)); count++; size += buf_obj->size; mutex_unlock(&buf_obj->lock); Loading drivers/staging/android/ion/Kconfig +55 −0 Original line number Diff line number Diff line Loading @@ -34,3 +34,58 @@ config ION_CMA_HEAP Choose this option to enable CMA heaps with Ion. This heap is backed by the Contiguous Memory Allocator (CMA). If your system has these regions, you should say Y here. config ION_MSM_HEAPS tristate "MSM platform-based Ion heaps support" depends on ION && DMA_CMA && QCOM_SECURE_BUFFER help Enable this option to enable platform-based Ion heaps. The heaps will register with the Ion core framework, at which point userspace clients can allocate different types of memory (e.g. secure, cached, and uncached) from the different types of heaps. The MSM heaps allow Ion buffers to be shared through the shared DMA buffer framework and the heaps implement their own cache maintenance operations. If you're not sure, enable here. config ION_FORCE_DMA_SYNC bool "Force ION to always DMA sync buffer memory" depends on ION help Force ION to DMA sync buffer memory when it is allocated and to always DMA sync the buffer memory on calls to begin/end cpu access. This makes ION DMA sync behavior similar to that of the older version of ION. We generally don't want to enable this config as it breaks the cache maintenance model. If you're not sure say N here. config ION_DEFER_FREE_NO_SCHED_IDLE bool "Increases the priority of ION defer free thread" depends on ION help Certain heaps such as the system heaps make use of a low priority thread to help free buffer allocations back to the pool which might result in future allocations requests going to the buddy instead of the pool when there is a high buffer allocation rate. Choose this option to remove the SCHED_IDLE flag in case of defer free thereby increasing the priority of defer free thread. if you're not sure say Y here. config ION_POOL_AUTO_REFILL bool "Refill the ION heap pools automatically" depends on ION help Choose this option to refill the ION system heap pools (non-secure) automatically when the pool pages count becomes lower than a set low mark. This refilling is done by worker thread which is invoked asynchronously when the pool count reaches below low mark. if you're not sure say Y here. config ION_POOL_FILL_MARK int "ion pool fillmark size in MB" depends on ION_POOL_AUTO_REFILL range 16 256 default 100 help Set the fillmark of the pool in terms of mega bytes and the lowmark is ION_POOL_LOW_MARK_PERCENT of fillmark value. Loading
drivers/dma-buf/Kconfig +11 −0 Original line number Diff line number Diff line Loading @@ -30,6 +30,17 @@ config SW_SYNC WARNING: improper use of this can result in deadlocking kernel drivers from userspace. Intended for test and debug only. config DEBUG_DMA_BUF_REF bool "DEBUG Reference Count" depends on STACKDEPOT depends on DMA_SHARED_BUFFER default n help Save stack traces for every call to dma_buf_get and dma_buf_put, to help debug memory leaks. Potential leaks may be found by manually matching the get/put call stacks. This feature consumes extra memory in order to save the stack traces using STACKDEPOT. config UDMABUF bool "userspace dmabuf misc driver" default n Loading
drivers/dma-buf/Makefile +1 −0 Original line number Diff line number Diff line Loading @@ -3,4 +3,5 @@ obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \ reservation.o seqno-fence.o obj-$(CONFIG_SYNC_FILE) += sync_file.o obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o obj-$(CONFIG_DEBUG_DMA_BUF_REF) += dma-buf-ref.o obj-$(CONFIG_UDMABUF) += udmabuf.o
drivers/dma-buf/dma-buf-ref.c 0 → 100644 +114 −0 Original line number Diff line number Diff line // SPDX-License-Identifier: GPL-2.0-only #include <linux/dma-buf.h> #include <linux/slab.h> #include <linux/stackdepot.h> #include <linux/stacktrace.h> #include <linux/seq_file.h> #define DMA_BUF_STACK_DEPTH (16) struct dma_buf_ref { struct list_head list; depot_stack_handle_t handle; int count; }; void dma_buf_ref_init(struct msm_dma_buf *msm_dma_buf) { INIT_LIST_HEAD(&msm_dma_buf->refs); } void dma_buf_ref_destroy(struct msm_dma_buf *msm_dma_buf) { struct dma_buf_ref *r, *n; struct dma_buf *dmabuf = &msm_dma_buf->dma_buf; mutex_lock(&dmabuf->lock); list_for_each_entry_safe(r, n, &msm_dma_buf->refs, list) { list_del(&r->list); kfree(r); } mutex_unlock(&dmabuf->lock); } static void dma_buf_ref_insert_handle(struct msm_dma_buf *msm_dma_buf, depot_stack_handle_t handle, int count) { struct dma_buf_ref *r; struct dma_buf *dmabuf = &msm_dma_buf->dma_buf; mutex_lock(&dmabuf->lock); list_for_each_entry(r, &msm_dma_buf->refs, list) { if (r->handle == handle) { r->count += count; goto out; } } r = kzalloc(sizeof(*r), GFP_KERNEL); if (!r) goto out; INIT_LIST_HEAD(&r->list); r->handle = handle; r->count = count; list_add(&r->list, &msm_dma_buf->refs); out: mutex_unlock(&dmabuf->lock); } void dma_buf_ref_mod(struct msm_dma_buf *msm_dma_buf, int nr) { unsigned long entries[DMA_BUF_STACK_DEPTH]; struct stack_trace trace = { .nr_entries = 0, .entries = entries, .max_entries = DMA_BUF_STACK_DEPTH, .skip = 1 }; depot_stack_handle_t handle; save_stack_trace(&trace); if (trace.nr_entries != 0 && trace.entries[trace.nr_entries-1] == ULONG_MAX) trace.nr_entries--; handle = depot_save_stack(&trace, GFP_KERNEL); if (!handle) return; dma_buf_ref_insert_handle(msm_dma_buf, handle, nr); } /** * Called with dmabuf->lock held */ int dma_buf_ref_show(struct seq_file *s, struct msm_dma_buf *msm_dma_buf) { char *buf; struct dma_buf_ref *ref; int count = 0; struct stack_trace trace; buf = (void *)__get_free_page(GFP_KERNEL); if (!buf) return -ENOMEM; list_for_each_entry(ref, &msm_dma_buf->refs, list) { count += ref->count; seq_printf(s, "References: %d\n", ref->count); depot_fetch_stack(ref->handle, &trace); snprint_stack_trace(buf, PAGE_SIZE, &trace, 0); seq_puts(s, buf); seq_putc(s, '\n'); } seq_printf(s, "Total references: %d\n\n\n", count); free_page((unsigned long)buf); return 0; }
drivers/dma-buf/dma-buf.c +19 −7 Original line number Diff line number Diff line Loading @@ -79,12 +79,14 @@ static struct file_system_type dma_buf_fs_type = { static int dma_buf_release(struct inode *inode, struct file *file) { struct msm_dma_buf *msm_dma_buf; struct dma_buf *dmabuf; if (!is_dma_buf_file(file)) return -EINVAL; dmabuf = file->private_data; msm_dma_buf = to_msm_dma_buf(dmabuf); BUG_ON(dmabuf->vmapping_counter); Loading @@ -98,17 +100,18 @@ static int dma_buf_release(struct inode *inode, struct file *file) */ BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active); dmabuf->ops->release(dmabuf); mutex_lock(&db_list.lock); list_del(&dmabuf->list_node); mutex_unlock(&db_list.lock); dmabuf->ops->release(dmabuf); dma_buf_ref_destroy(msm_dma_buf); if (dmabuf->resv == (struct reservation_object *)&dmabuf[1]) reservation_object_fini(dmabuf->resv); module_put(dmabuf->owner); kfree(dmabuf); kfree(msm_dma_buf); return 0; } Loading Loading @@ -505,10 +508,11 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags) */ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) { struct msm_dma_buf *msm_dma_buf; struct dma_buf *dmabuf; struct reservation_object *resv = exp_info->resv; struct file *file; size_t alloc_size = sizeof(struct dma_buf); size_t alloc_size = sizeof(struct msm_dma_buf); int ret; if (!exp_info->resv) Loading @@ -528,12 +532,13 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) if (!try_module_get(exp_info->owner)) return ERR_PTR(-ENOENT); dmabuf = kzalloc(alloc_size, GFP_KERNEL); if (!dmabuf) { msm_dma_buf = kzalloc(alloc_size, GFP_KERNEL); if (!msm_dma_buf) { ret = -ENOMEM; goto err_module; } dmabuf = &msm_dma_buf->dma_buf; dmabuf->priv = exp_info->priv; dmabuf->ops = exp_info->ops; dmabuf->size = exp_info->size; Loading Loading @@ -561,6 +566,9 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) mutex_init(&dmabuf->lock); INIT_LIST_HEAD(&dmabuf->attachments); dma_buf_ref_init(msm_dma_buf); dma_buf_ref_mod(msm_dma_buf, 1); mutex_lock(&db_list.lock); list_add(&dmabuf->list_node, &db_list.head); mutex_unlock(&db_list.lock); Loading @@ -568,7 +576,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) return dmabuf; err_dmabuf: kfree(dmabuf); kfree(msm_dma_buf); err_module: module_put(exp_info->owner); return ERR_PTR(ret); Loading Loading @@ -620,6 +628,7 @@ struct dma_buf *dma_buf_get(int fd) fput(file); return ERR_PTR(-EINVAL); } dma_buf_ref_mod(to_msm_dma_buf(file->private_data), 1); return file->private_data; } Loading @@ -640,6 +649,7 @@ void dma_buf_put(struct dma_buf *dmabuf) if (WARN_ON(!dmabuf || !dmabuf->file)) return; dma_buf_ref_mod(to_msm_dma_buf(dmabuf), -1); fput(dmabuf->file); } EXPORT_SYMBOL_GPL(dma_buf_put); Loading Loading @@ -1281,6 +1291,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) seq_printf(s, "Total %d devices attached\n\n", attach_count); dma_buf_ref_show(s, to_msm_dma_buf(buf_obj)); count++; size += buf_obj->size; mutex_unlock(&buf_obj->lock); Loading
drivers/staging/android/ion/Kconfig +55 −0 Original line number Diff line number Diff line Loading @@ -34,3 +34,58 @@ config ION_CMA_HEAP Choose this option to enable CMA heaps with Ion. This heap is backed by the Contiguous Memory Allocator (CMA). If your system has these regions, you should say Y here. config ION_MSM_HEAPS tristate "MSM platform-based Ion heaps support" depends on ION && DMA_CMA && QCOM_SECURE_BUFFER help Enable this option to enable platform-based Ion heaps. The heaps will register with the Ion core framework, at which point userspace clients can allocate different types of memory (e.g. secure, cached, and uncached) from the different types of heaps. The MSM heaps allow Ion buffers to be shared through the shared DMA buffer framework and the heaps implement their own cache maintenance operations. If you're not sure, enable here. config ION_FORCE_DMA_SYNC bool "Force ION to always DMA sync buffer memory" depends on ION help Force ION to DMA sync buffer memory when it is allocated and to always DMA sync the buffer memory on calls to begin/end cpu access. This makes ION DMA sync behavior similar to that of the older version of ION. We generally don't want to enable this config as it breaks the cache maintenance model. If you're not sure say N here. config ION_DEFER_FREE_NO_SCHED_IDLE bool "Increases the priority of ION defer free thread" depends on ION help Certain heaps such as the system heaps make use of a low priority thread to help free buffer allocations back to the pool which might result in future allocations requests going to the buddy instead of the pool when there is a high buffer allocation rate. Choose this option to remove the SCHED_IDLE flag in case of defer free thereby increasing the priority of defer free thread. if you're not sure say Y here. config ION_POOL_AUTO_REFILL bool "Refill the ION heap pools automatically" depends on ION help Choose this option to refill the ION system heap pools (non-secure) automatically when the pool pages count becomes lower than a set low mark. This refilling is done by worker thread which is invoked asynchronously when the pool count reaches below low mark. if you're not sure say Y here. config ION_POOL_FILL_MARK int "ion pool fillmark size in MB" depends on ION_POOL_AUTO_REFILL range 16 256 default 100 help Set the fillmark of the pool in terms of mega bytes and the lowmark is ION_POOL_LOW_MARK_PERCENT of fillmark value.