Loading drivers/gpu/msm/kgsl.c +64 −32 Original line number Original line Diff line number Diff line Loading @@ -235,6 +235,7 @@ static struct kgsl_mem_entry *kgsl_mem_entry_create(void) kref_get(&entry->refcount); kref_get(&entry->refcount); } } atomic_set(&entry->map_count, 0); return entry; return entry; } } Loading Loading @@ -502,9 +503,6 @@ static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry) idr_remove(&entry->priv->mem_idr, entry->id); idr_remove(&entry->priv->mem_idr, entry->id); entry->id = 0; entry->id = 0; atomic_long_sub(atomic_long_read(&entry->memdesc.mapsize), &entry->priv->gpumem_mapped); spin_unlock(&entry->priv->mem_lock); spin_unlock(&entry->priv->mem_lock); kgsl_mmu_put_gpuaddr(&entry->memdesc); kgsl_mmu_put_gpuaddr(&entry->memdesc); Loading Loading @@ -2448,7 +2446,7 @@ static int check_vma(unsigned long hostptr, u64 size) return true; return true; } } static int memdesc_sg_virt(struct kgsl_memdesc *memdesc) static int memdesc_sg_virt(struct kgsl_memdesc *memdesc, unsigned long useraddr) { { int ret = 0; int ret = 0; long npages = 0, i; long npages = 0, i; Loading @@ -2471,13 +2469,13 @@ static int memdesc_sg_virt(struct kgsl_memdesc *memdesc) } } down_read(¤t->mm->mmap_sem); down_read(¤t->mm->mmap_sem); if (!check_vma(memdesc->useraddr, memdesc->size)) { if (!check_vma(useraddr, memdesc->size)) { up_read(¤t->mm->mmap_sem); up_read(¤t->mm->mmap_sem); ret = -EFAULT; ret = -EFAULT; goto out; goto out; } } npages = get_user_pages(memdesc->useraddr, sglen, write, pages, NULL); npages = get_user_pages(useraddr, sglen, write, pages, NULL); up_read(¤t->mm->mmap_sem); up_read(¤t->mm->mmap_sem); ret = (npages < 0) ? (int)npages : 0; ret = (npages < 0) ? (int)npages : 0; Loading Loading @@ -2515,7 +2513,6 @@ static int kgsl_setup_anon_useraddr(struct kgsl_pagetable *pagetable, entry->memdesc.pagetable = pagetable; entry->memdesc.pagetable = pagetable; entry->memdesc.size = (uint64_t) size; entry->memdesc.size = (uint64_t) size; entry->memdesc.useraddr = hostptr; entry->memdesc.flags |= (uint64_t)KGSL_MEMFLAGS_USERMEM_ADDR; entry->memdesc.flags |= (uint64_t)KGSL_MEMFLAGS_USERMEM_ADDR; if (kgsl_memdesc_use_cpu_map(&entry->memdesc)) { if (kgsl_memdesc_use_cpu_map(&entry->memdesc)) { Loading @@ -2523,15 +2520,15 @@ static int kgsl_setup_anon_useraddr(struct kgsl_pagetable *pagetable, /* Register the address in the database */ /* Register the address in the database */ ret = kgsl_mmu_set_svm_region(pagetable, ret = kgsl_mmu_set_svm_region(pagetable, (uint64_t) entry->memdesc.useraddr, (uint64_t) size); (uint64_t) hostptr, (uint64_t) size); if (ret) if (ret) return ret; return ret; entry->memdesc.gpuaddr = (uint64_t) entry->memdesc.useraddr; entry->memdesc.gpuaddr = (uint64_t) hostptr; } } return memdesc_sg_virt(&entry->memdesc); return memdesc_sg_virt(&entry->memdesc, hostptr); } } #ifdef CONFIG_DMA_SHARED_BUFFER #ifdef CONFIG_DMA_SHARED_BUFFER Loading Loading @@ -2616,8 +2613,7 @@ static int kgsl_setup_dmabuf_useraddr(struct kgsl_device *device, return ret; return ret; } } /* Setup the user addr/cache mode for cache operations */ /* Setup the cache mode for cache operations */ entry->memdesc.useraddr = hostptr; _setup_cache_mode(entry, vma); _setup_cache_mode(entry, vma); if (MMU_FEATURE(&device->mmu, KGSL_MMU_IO_COHERENT)) if (MMU_FEATURE(&device->mmu, KGSL_MMU_IO_COHERENT)) Loading Loading @@ -3675,7 +3671,12 @@ long kgsl_ioctl_gpumem_get_info(struct kgsl_device_private *dev_priv, param->flags = (unsigned int) entry->memdesc.flags; param->flags = (unsigned int) entry->memdesc.flags; param->size = (size_t) entry->memdesc.size; param->size = (size_t) entry->memdesc.size; param->mmapsize = (size_t) kgsl_memdesc_footprint(&entry->memdesc); param->mmapsize = (size_t) kgsl_memdesc_footprint(&entry->memdesc); param->useraddr = entry->memdesc.useraddr; /* * Entries can have multiple user mappings so thre isn't any one address * we can report. Plus, the user should already know their mappings, so * there isn't any value in reporting it back to them. */ param->useraddr = 0; kgsl_mem_entry_put(entry); kgsl_mem_entry_put(entry); return result; return result; Loading Loading @@ -4148,9 +4149,6 @@ static int _sparse_bind(struct kgsl_process_private *process, if (memdesc->gpuaddr) if (memdesc->gpuaddr) return -EINVAL; return -EINVAL; if (memdesc->useraddr != 0) return -EINVAL; pagetable = memdesc->pagetable; pagetable = memdesc->pagetable; /* Clear out any mappings */ /* Clear out any mappings */ Loading Loading @@ -4431,7 +4429,12 @@ long kgsl_ioctl_gpuobj_info(struct kgsl_device_private *dev_priv, param->flags = entry->memdesc.flags; param->flags = entry->memdesc.flags; param->size = entry->memdesc.size; param->size = entry->memdesc.size; param->va_len = kgsl_memdesc_footprint(&entry->memdesc); param->va_len = kgsl_memdesc_footprint(&entry->memdesc); param->va_addr = (uint64_t) entry->memdesc.useraddr; /* * Entries can have multiple user mappings so thre isn't any one address * we can report. Plus, the user should already know their mappings, so * there isn't any value in reporting it back to them. */ param->va_addr = 0; kgsl_mem_entry_put(entry); kgsl_mem_entry_put(entry); return 0; return 0; Loading Loading @@ -4539,24 +4542,21 @@ static void kgsl_gpumem_vm_open(struct vm_area_struct *vma) if (kgsl_mem_entry_get(entry) == 0) if (kgsl_mem_entry_get(entry) == 0) vma->vm_private_data = NULL; vma->vm_private_data = NULL; atomic_inc(&entry->map_count); } } static int static int kgsl_gpumem_vm_fault(struct vm_fault *vmf) kgsl_gpumem_vm_fault(struct vm_fault *vmf) { { struct kgsl_mem_entry *entry = vmf->vma->vm_private_data; struct kgsl_mem_entry *entry = vmf->vma->vm_private_data; int ret; if (!entry) if (!entry) return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS; if (!entry->memdesc.ops || !entry->memdesc.ops->vmfault) if (!entry->memdesc.ops || !entry->memdesc.ops->vmfault) return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS; ret = entry->memdesc.ops->vmfault(&entry->memdesc, vmf->vma, vmf); return entry->memdesc.ops->vmfault(&entry->memdesc, vmf->vma, vmf); if ((ret == 0) || (ret == VM_FAULT_NOPAGE)) atomic_long_add(PAGE_SIZE, &entry->priv->gpumem_mapped); return ret; } } static void static void Loading @@ -4567,7 +4567,14 @@ kgsl_gpumem_vm_close(struct vm_area_struct *vma) if (!entry) if (!entry) return; return; entry->memdesc.useraddr = 0; /* * Remove the memdesc from the mapped stat once all the mappings have * gone away */ if (!atomic_dec_return(&entry->map_count)) atomic_long_sub(entry->memdesc.size, &entry->priv->gpumem_mapped); kgsl_mem_entry_put_deferred(entry); kgsl_mem_entry_put_deferred(entry); } } Loading Loading @@ -4606,7 +4613,8 @@ get_mmap_entry(struct kgsl_process_private *private, } } } } if (entry->memdesc.useraddr != 0) { /* Don't allow ourselves to remap user memory */ if (entry->memdesc.flags & KGSL_MEMFLAGS_USERMEM_ADDR) { ret = -EBUSY; ret = -EBUSY; goto err_put; goto err_put; } } Loading Loading @@ -4639,19 +4647,34 @@ static unsigned long _gpu_set_svm_region(struct kgsl_process_private *private, { { int ret; int ret; /* * Protect access to the gpuaddr here to prevent multiple vmas from * trying to map a SVM region at the same time */ spin_lock(&entry->memdesc.gpuaddr_lock); if (entry->memdesc.gpuaddr) { spin_unlock(&entry->memdesc.gpuaddr_lock); return (unsigned long) -EBUSY; } ret = kgsl_mmu_set_svm_region(private->pagetable, (uint64_t) addr, ret = kgsl_mmu_set_svm_region(private->pagetable, (uint64_t) addr, (uint64_t) size); (uint64_t) size); if (ret != 0) if (ret != 0) { return ret; spin_unlock(&entry->memdesc.gpuaddr_lock); return (unsigned long) ret; } entry->memdesc.gpuaddr = (uint64_t) addr; entry->memdesc.gpuaddr = (uint64_t) addr; spin_unlock(&entry->memdesc.gpuaddr_lock); entry->memdesc.pagetable = private->pagetable; entry->memdesc.pagetable = private->pagetable; ret = kgsl_mmu_map(private->pagetable, &entry->memdesc); ret = kgsl_mmu_map(private->pagetable, &entry->memdesc); if (ret) { if (ret) { kgsl_mmu_put_gpuaddr(&entry->memdesc); kgsl_mmu_put_gpuaddr(&entry->memdesc); return ret; return (unsigned long) ret; } } kgsl_memfree_purge(private->pagetable, entry->memdesc.gpuaddr, kgsl_memfree_purge(private->pagetable, entry->memdesc.gpuaddr, Loading Loading @@ -4715,6 +4738,14 @@ static unsigned long _search_range(struct kgsl_process_private *private, result = _gpu_set_svm_region(private, entry, cpu, len); result = _gpu_set_svm_region(private, entry, cpu, len); if (!IS_ERR_VALUE(result)) if (!IS_ERR_VALUE(result)) break; break; /* * _gpu_set_svm_region will return -EBUSY if we tried to set up * SVM on an object that already has a GPU address. If * that happens don't bother walking the rest of the * region */ if ((long) result == -EBUSY) return -EBUSY; trace_kgsl_mem_unmapped_area_collision(entry, cpu, len); trace_kgsl_mem_unmapped_area_collision(entry, cpu, len); Loading Loading @@ -4936,14 +4967,11 @@ static int kgsl_mmap(struct file *file, struct vm_area_struct *vma) vm_insert_page(vma, addr, page); vm_insert_page(vma, addr, page); addr += PAGE_SIZE; addr += PAGE_SIZE; } } atomic_long_add(m->size, &m->mapsize); atomic_long_add(m->size, &entry->priv->gpumem_mapped); } } vma->vm_file = file; vma->vm_file = file; entry->memdesc.vma = vma; entry->memdesc.vma = vma; entry->memdesc.useraddr = vma->vm_start; /* /* * kgsl gets the entry id or the gpu address through vm_pgoff. * kgsl gets the entry id or the gpu address through vm_pgoff. Loading @@ -4954,7 +4982,11 @@ static int kgsl_mmap(struct file *file, struct vm_area_struct *vma) */ */ vma->vm_pgoff = 0; vma->vm_pgoff = 0; trace_kgsl_mem_mmap(entry); if (atomic_inc_return(&entry->map_count) == 1) atomic_long_add(entry->memdesc.size, &entry->priv->gpumem_mapped); trace_kgsl_mem_mmap(entry, vma->vm_start); return 0; return 0; } } Loading drivers/gpu/msm/kgsl.h +10 −4 Original line number Original line Diff line number Diff line Loading @@ -196,11 +196,9 @@ struct kgsl_memdesc_ops { * @pagetable: Pointer to the pagetable that the object is mapped in * @pagetable: Pointer to the pagetable that the object is mapped in * @hostptr: Kernel virtual address * @hostptr: Kernel virtual address * @hostptr_count: Number of threads using hostptr * @hostptr_count: Number of threads using hostptr * @useraddr: User virtual address (if applicable) * @gpuaddr: GPU virtual address * @gpuaddr: GPU virtual address * @physaddr: Physical address of the memory object * @physaddr: Physical address of the memory object * @size: Size of the memory object * @size: Size of the memory object * @mapsize: Size of memory mapped in userspace * @priv: Internal flags and settings * @priv: Internal flags and settings * @sgt: Scatter gather table for allocated pages * @sgt: Scatter gather table for allocated pages * @ops: Function hooks for the memdesc memory type * @ops: Function hooks for the memdesc memory type Loading @@ -216,11 +214,9 @@ struct kgsl_memdesc { struct kgsl_pagetable *pagetable; struct kgsl_pagetable *pagetable; void *hostptr; void *hostptr; unsigned int hostptr_count; unsigned int hostptr_count; unsigned long useraddr; uint64_t gpuaddr; uint64_t gpuaddr; phys_addr_t physaddr; phys_addr_t physaddr; uint64_t size; uint64_t size; atomic_long_t mapsize; unsigned int priv; unsigned int priv; struct sg_table *sgt; struct sg_table *sgt; struct kgsl_memdesc_ops *ops; struct kgsl_memdesc_ops *ops; Loading @@ -243,6 +239,11 @@ struct kgsl_memdesc { * @reclaimed_page_count: Total number of pages reclaimed * @reclaimed_page_count: Total number of pages reclaimed */ */ int reclaimed_page_count; int reclaimed_page_count; /* * @gpuaddr_lock: Spinlock to protect the gpuaddr from being accessed by * multiple entities trying to map the same SVM region at once */ spinlock_t gpuaddr_lock; }; }; /* /* Loading Loading @@ -291,6 +292,11 @@ struct kgsl_mem_entry { struct work_struct work; struct work_struct work; spinlock_t bind_lock; spinlock_t bind_lock; struct rb_root bind_tree; struct rb_root bind_tree; /** * @map_count: Count how many vmas this object is mapped in - used for * debugfs accounting */ atomic_t map_count; }; }; struct kgsl_device_private; struct kgsl_device_private; Loading drivers/gpu/msm/kgsl_debugfs.c +13 −6 Original line number Original line Diff line number Diff line Loading @@ -147,7 +147,11 @@ static int print_mem_entry(void *data, void *ptr) flags[3] = get_alignflag(m); flags[3] = get_alignflag(m); flags[4] = get_cacheflag(m); flags[4] = get_cacheflag(m); flags[5] = kgsl_memdesc_use_cpu_map(m) ? 'p' : '-'; flags[5] = kgsl_memdesc_use_cpu_map(m) ? 'p' : '-'; flags[6] = (m->useraddr) ? 'Y' : 'N'; /* * Show Y if at least one vma has this entry * mapped (could be multiple) */ flags[6] = atomic_read(&entry->map_count) ? 'Y' : 'N'; flags[7] = kgsl_memdesc_is_secured(m) ? 's' : '-'; flags[7] = kgsl_memdesc_is_secured(m) ? 's' : '-'; flags[8] = m->flags & KGSL_MEMFLAGS_SPARSE_PHYS ? 'P' : '-'; flags[8] = m->flags & KGSL_MEMFLAGS_SPARSE_PHYS ? 'P' : '-'; flags[9] = '\0'; flags[9] = '\0'; Loading @@ -158,13 +162,16 @@ static int print_mem_entry(void *data, void *ptr) kgsl_get_egl_counts(entry, &egl_surface_count, kgsl_get_egl_counts(entry, &egl_surface_count, &egl_image_count); &egl_image_count); seq_printf(s, "%pK %pK %16llu %5d %9s %10s %16s %5d %16ld %6d %6d", seq_printf(s, "%pK %pK %16llu %5d %9s %10s %16s %5d %16d %6d %6d", (uint64_t *)(uintptr_t) m->gpuaddr, (uint64_t *)(uintptr_t) m->gpuaddr, (unsigned long *) m->useraddr, /* m->size, entry->id, flags, * Show zero for the useraddr - we can't reliably track * that value for multiple vmas anyway */ 0, m->size, entry->id, flags, memtype_str(usermem_type), memtype_str(usermem_type), usage, (m->sgt ? m->sgt->nents : 0), usage, (m->sgt ? m->sgt->nents : 0), atomic_long_read(&m->mapsize), atomic_read(&entry->map_count), egl_surface_count, egl_image_count); egl_surface_count, egl_image_count); if (entry->metadata[0] != 0) if (entry->metadata[0] != 0) Loading Loading @@ -235,7 +242,7 @@ static int process_mem_seq_show(struct seq_file *s, void *ptr) if (ptr == SEQ_START_TOKEN) { if (ptr == SEQ_START_TOKEN) { seq_printf(s, "%16s %16s %16s %5s %9s %10s %16s %5s %16s %6s %6s\n", seq_printf(s, "%16s %16s %16s %5s %9s %10s %16s %5s %16s %6s %6s\n", "gpuaddr", "useraddr", "size", "id", "flags", "type", "gpuaddr", "useraddr", "size", "id", "flags", "type", "usage", "sglen", "mapsize", "eglsrf", "eglimg"); "usage", "sglen", "mapcount", "eglsrf", "eglimg"); return 0; return 0; } else } else return print_mem_entry(s, ptr); return print_mem_entry(s, ptr); Loading drivers/gpu/msm/kgsl_iommu.c +5 −0 Original line number Original line Diff line number Diff line Loading @@ -2489,6 +2489,11 @@ static int get_gpuaddr(struct kgsl_pagetable *pagetable, return -ENOMEM; return -ENOMEM; } } /* * This path is only called in a non-SVM path with locks so we can be * sure we aren't racing with anybody so we don't need to worry about * taking the lock */ ret = _insert_gpuaddr(pagetable, addr, size); ret = _insert_gpuaddr(pagetable, addr, size); spin_unlock(&pagetable->lock); spin_unlock(&pagetable->lock); Loading drivers/gpu/msm/kgsl_mmu.c +8 −1 Original line number Original line Diff line number Diff line Loading @@ -436,10 +436,17 @@ void kgsl_mmu_put_gpuaddr(struct kgsl_memdesc *memdesc) if (PT_OP_VALID(pagetable, put_gpuaddr) && (unmap_fail == 0)) if (PT_OP_VALID(pagetable, put_gpuaddr) && (unmap_fail == 0)) pagetable->pt_ops->put_gpuaddr(memdesc); pagetable->pt_ops->put_gpuaddr(memdesc); memdesc->pagetable = NULL; /* * If SVM tries to take a GPU address it will lose the race until the * gpuaddr returns to zero so we shouldn't need to worry about taking a * lock here */ if (!kgsl_memdesc_is_global(memdesc)) if (!kgsl_memdesc_is_global(memdesc)) memdesc->gpuaddr = 0; memdesc->gpuaddr = 0; memdesc->pagetable = NULL; } } EXPORT_SYMBOL(kgsl_mmu_put_gpuaddr); EXPORT_SYMBOL(kgsl_mmu_put_gpuaddr); Loading Loading
drivers/gpu/msm/kgsl.c +64 −32 Original line number Original line Diff line number Diff line Loading @@ -235,6 +235,7 @@ static struct kgsl_mem_entry *kgsl_mem_entry_create(void) kref_get(&entry->refcount); kref_get(&entry->refcount); } } atomic_set(&entry->map_count, 0); return entry; return entry; } } Loading Loading @@ -502,9 +503,6 @@ static void kgsl_mem_entry_detach_process(struct kgsl_mem_entry *entry) idr_remove(&entry->priv->mem_idr, entry->id); idr_remove(&entry->priv->mem_idr, entry->id); entry->id = 0; entry->id = 0; atomic_long_sub(atomic_long_read(&entry->memdesc.mapsize), &entry->priv->gpumem_mapped); spin_unlock(&entry->priv->mem_lock); spin_unlock(&entry->priv->mem_lock); kgsl_mmu_put_gpuaddr(&entry->memdesc); kgsl_mmu_put_gpuaddr(&entry->memdesc); Loading Loading @@ -2448,7 +2446,7 @@ static int check_vma(unsigned long hostptr, u64 size) return true; return true; } } static int memdesc_sg_virt(struct kgsl_memdesc *memdesc) static int memdesc_sg_virt(struct kgsl_memdesc *memdesc, unsigned long useraddr) { { int ret = 0; int ret = 0; long npages = 0, i; long npages = 0, i; Loading @@ -2471,13 +2469,13 @@ static int memdesc_sg_virt(struct kgsl_memdesc *memdesc) } } down_read(¤t->mm->mmap_sem); down_read(¤t->mm->mmap_sem); if (!check_vma(memdesc->useraddr, memdesc->size)) { if (!check_vma(useraddr, memdesc->size)) { up_read(¤t->mm->mmap_sem); up_read(¤t->mm->mmap_sem); ret = -EFAULT; ret = -EFAULT; goto out; goto out; } } npages = get_user_pages(memdesc->useraddr, sglen, write, pages, NULL); npages = get_user_pages(useraddr, sglen, write, pages, NULL); up_read(¤t->mm->mmap_sem); up_read(¤t->mm->mmap_sem); ret = (npages < 0) ? (int)npages : 0; ret = (npages < 0) ? (int)npages : 0; Loading Loading @@ -2515,7 +2513,6 @@ static int kgsl_setup_anon_useraddr(struct kgsl_pagetable *pagetable, entry->memdesc.pagetable = pagetable; entry->memdesc.pagetable = pagetable; entry->memdesc.size = (uint64_t) size; entry->memdesc.size = (uint64_t) size; entry->memdesc.useraddr = hostptr; entry->memdesc.flags |= (uint64_t)KGSL_MEMFLAGS_USERMEM_ADDR; entry->memdesc.flags |= (uint64_t)KGSL_MEMFLAGS_USERMEM_ADDR; if (kgsl_memdesc_use_cpu_map(&entry->memdesc)) { if (kgsl_memdesc_use_cpu_map(&entry->memdesc)) { Loading @@ -2523,15 +2520,15 @@ static int kgsl_setup_anon_useraddr(struct kgsl_pagetable *pagetable, /* Register the address in the database */ /* Register the address in the database */ ret = kgsl_mmu_set_svm_region(pagetable, ret = kgsl_mmu_set_svm_region(pagetable, (uint64_t) entry->memdesc.useraddr, (uint64_t) size); (uint64_t) hostptr, (uint64_t) size); if (ret) if (ret) return ret; return ret; entry->memdesc.gpuaddr = (uint64_t) entry->memdesc.useraddr; entry->memdesc.gpuaddr = (uint64_t) hostptr; } } return memdesc_sg_virt(&entry->memdesc); return memdesc_sg_virt(&entry->memdesc, hostptr); } } #ifdef CONFIG_DMA_SHARED_BUFFER #ifdef CONFIG_DMA_SHARED_BUFFER Loading Loading @@ -2616,8 +2613,7 @@ static int kgsl_setup_dmabuf_useraddr(struct kgsl_device *device, return ret; return ret; } } /* Setup the user addr/cache mode for cache operations */ /* Setup the cache mode for cache operations */ entry->memdesc.useraddr = hostptr; _setup_cache_mode(entry, vma); _setup_cache_mode(entry, vma); if (MMU_FEATURE(&device->mmu, KGSL_MMU_IO_COHERENT)) if (MMU_FEATURE(&device->mmu, KGSL_MMU_IO_COHERENT)) Loading Loading @@ -3675,7 +3671,12 @@ long kgsl_ioctl_gpumem_get_info(struct kgsl_device_private *dev_priv, param->flags = (unsigned int) entry->memdesc.flags; param->flags = (unsigned int) entry->memdesc.flags; param->size = (size_t) entry->memdesc.size; param->size = (size_t) entry->memdesc.size; param->mmapsize = (size_t) kgsl_memdesc_footprint(&entry->memdesc); param->mmapsize = (size_t) kgsl_memdesc_footprint(&entry->memdesc); param->useraddr = entry->memdesc.useraddr; /* * Entries can have multiple user mappings so thre isn't any one address * we can report. Plus, the user should already know their mappings, so * there isn't any value in reporting it back to them. */ param->useraddr = 0; kgsl_mem_entry_put(entry); kgsl_mem_entry_put(entry); return result; return result; Loading Loading @@ -4148,9 +4149,6 @@ static int _sparse_bind(struct kgsl_process_private *process, if (memdesc->gpuaddr) if (memdesc->gpuaddr) return -EINVAL; return -EINVAL; if (memdesc->useraddr != 0) return -EINVAL; pagetable = memdesc->pagetable; pagetable = memdesc->pagetable; /* Clear out any mappings */ /* Clear out any mappings */ Loading Loading @@ -4431,7 +4429,12 @@ long kgsl_ioctl_gpuobj_info(struct kgsl_device_private *dev_priv, param->flags = entry->memdesc.flags; param->flags = entry->memdesc.flags; param->size = entry->memdesc.size; param->size = entry->memdesc.size; param->va_len = kgsl_memdesc_footprint(&entry->memdesc); param->va_len = kgsl_memdesc_footprint(&entry->memdesc); param->va_addr = (uint64_t) entry->memdesc.useraddr; /* * Entries can have multiple user mappings so thre isn't any one address * we can report. Plus, the user should already know their mappings, so * there isn't any value in reporting it back to them. */ param->va_addr = 0; kgsl_mem_entry_put(entry); kgsl_mem_entry_put(entry); return 0; return 0; Loading Loading @@ -4539,24 +4542,21 @@ static void kgsl_gpumem_vm_open(struct vm_area_struct *vma) if (kgsl_mem_entry_get(entry) == 0) if (kgsl_mem_entry_get(entry) == 0) vma->vm_private_data = NULL; vma->vm_private_data = NULL; atomic_inc(&entry->map_count); } } static int static int kgsl_gpumem_vm_fault(struct vm_fault *vmf) kgsl_gpumem_vm_fault(struct vm_fault *vmf) { { struct kgsl_mem_entry *entry = vmf->vma->vm_private_data; struct kgsl_mem_entry *entry = vmf->vma->vm_private_data; int ret; if (!entry) if (!entry) return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS; if (!entry->memdesc.ops || !entry->memdesc.ops->vmfault) if (!entry->memdesc.ops || !entry->memdesc.ops->vmfault) return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS; ret = entry->memdesc.ops->vmfault(&entry->memdesc, vmf->vma, vmf); return entry->memdesc.ops->vmfault(&entry->memdesc, vmf->vma, vmf); if ((ret == 0) || (ret == VM_FAULT_NOPAGE)) atomic_long_add(PAGE_SIZE, &entry->priv->gpumem_mapped); return ret; } } static void static void Loading @@ -4567,7 +4567,14 @@ kgsl_gpumem_vm_close(struct vm_area_struct *vma) if (!entry) if (!entry) return; return; entry->memdesc.useraddr = 0; /* * Remove the memdesc from the mapped stat once all the mappings have * gone away */ if (!atomic_dec_return(&entry->map_count)) atomic_long_sub(entry->memdesc.size, &entry->priv->gpumem_mapped); kgsl_mem_entry_put_deferred(entry); kgsl_mem_entry_put_deferred(entry); } } Loading Loading @@ -4606,7 +4613,8 @@ get_mmap_entry(struct kgsl_process_private *private, } } } } if (entry->memdesc.useraddr != 0) { /* Don't allow ourselves to remap user memory */ if (entry->memdesc.flags & KGSL_MEMFLAGS_USERMEM_ADDR) { ret = -EBUSY; ret = -EBUSY; goto err_put; goto err_put; } } Loading Loading @@ -4639,19 +4647,34 @@ static unsigned long _gpu_set_svm_region(struct kgsl_process_private *private, { { int ret; int ret; /* * Protect access to the gpuaddr here to prevent multiple vmas from * trying to map a SVM region at the same time */ spin_lock(&entry->memdesc.gpuaddr_lock); if (entry->memdesc.gpuaddr) { spin_unlock(&entry->memdesc.gpuaddr_lock); return (unsigned long) -EBUSY; } ret = kgsl_mmu_set_svm_region(private->pagetable, (uint64_t) addr, ret = kgsl_mmu_set_svm_region(private->pagetable, (uint64_t) addr, (uint64_t) size); (uint64_t) size); if (ret != 0) if (ret != 0) { return ret; spin_unlock(&entry->memdesc.gpuaddr_lock); return (unsigned long) ret; } entry->memdesc.gpuaddr = (uint64_t) addr; entry->memdesc.gpuaddr = (uint64_t) addr; spin_unlock(&entry->memdesc.gpuaddr_lock); entry->memdesc.pagetable = private->pagetable; entry->memdesc.pagetable = private->pagetable; ret = kgsl_mmu_map(private->pagetable, &entry->memdesc); ret = kgsl_mmu_map(private->pagetable, &entry->memdesc); if (ret) { if (ret) { kgsl_mmu_put_gpuaddr(&entry->memdesc); kgsl_mmu_put_gpuaddr(&entry->memdesc); return ret; return (unsigned long) ret; } } kgsl_memfree_purge(private->pagetable, entry->memdesc.gpuaddr, kgsl_memfree_purge(private->pagetable, entry->memdesc.gpuaddr, Loading Loading @@ -4715,6 +4738,14 @@ static unsigned long _search_range(struct kgsl_process_private *private, result = _gpu_set_svm_region(private, entry, cpu, len); result = _gpu_set_svm_region(private, entry, cpu, len); if (!IS_ERR_VALUE(result)) if (!IS_ERR_VALUE(result)) break; break; /* * _gpu_set_svm_region will return -EBUSY if we tried to set up * SVM on an object that already has a GPU address. If * that happens don't bother walking the rest of the * region */ if ((long) result == -EBUSY) return -EBUSY; trace_kgsl_mem_unmapped_area_collision(entry, cpu, len); trace_kgsl_mem_unmapped_area_collision(entry, cpu, len); Loading Loading @@ -4936,14 +4967,11 @@ static int kgsl_mmap(struct file *file, struct vm_area_struct *vma) vm_insert_page(vma, addr, page); vm_insert_page(vma, addr, page); addr += PAGE_SIZE; addr += PAGE_SIZE; } } atomic_long_add(m->size, &m->mapsize); atomic_long_add(m->size, &entry->priv->gpumem_mapped); } } vma->vm_file = file; vma->vm_file = file; entry->memdesc.vma = vma; entry->memdesc.vma = vma; entry->memdesc.useraddr = vma->vm_start; /* /* * kgsl gets the entry id or the gpu address through vm_pgoff. * kgsl gets the entry id or the gpu address through vm_pgoff. Loading @@ -4954,7 +4982,11 @@ static int kgsl_mmap(struct file *file, struct vm_area_struct *vma) */ */ vma->vm_pgoff = 0; vma->vm_pgoff = 0; trace_kgsl_mem_mmap(entry); if (atomic_inc_return(&entry->map_count) == 1) atomic_long_add(entry->memdesc.size, &entry->priv->gpumem_mapped); trace_kgsl_mem_mmap(entry, vma->vm_start); return 0; return 0; } } Loading
drivers/gpu/msm/kgsl.h +10 −4 Original line number Original line Diff line number Diff line Loading @@ -196,11 +196,9 @@ struct kgsl_memdesc_ops { * @pagetable: Pointer to the pagetable that the object is mapped in * @pagetable: Pointer to the pagetable that the object is mapped in * @hostptr: Kernel virtual address * @hostptr: Kernel virtual address * @hostptr_count: Number of threads using hostptr * @hostptr_count: Number of threads using hostptr * @useraddr: User virtual address (if applicable) * @gpuaddr: GPU virtual address * @gpuaddr: GPU virtual address * @physaddr: Physical address of the memory object * @physaddr: Physical address of the memory object * @size: Size of the memory object * @size: Size of the memory object * @mapsize: Size of memory mapped in userspace * @priv: Internal flags and settings * @priv: Internal flags and settings * @sgt: Scatter gather table for allocated pages * @sgt: Scatter gather table for allocated pages * @ops: Function hooks for the memdesc memory type * @ops: Function hooks for the memdesc memory type Loading @@ -216,11 +214,9 @@ struct kgsl_memdesc { struct kgsl_pagetable *pagetable; struct kgsl_pagetable *pagetable; void *hostptr; void *hostptr; unsigned int hostptr_count; unsigned int hostptr_count; unsigned long useraddr; uint64_t gpuaddr; uint64_t gpuaddr; phys_addr_t physaddr; phys_addr_t physaddr; uint64_t size; uint64_t size; atomic_long_t mapsize; unsigned int priv; unsigned int priv; struct sg_table *sgt; struct sg_table *sgt; struct kgsl_memdesc_ops *ops; struct kgsl_memdesc_ops *ops; Loading @@ -243,6 +239,11 @@ struct kgsl_memdesc { * @reclaimed_page_count: Total number of pages reclaimed * @reclaimed_page_count: Total number of pages reclaimed */ */ int reclaimed_page_count; int reclaimed_page_count; /* * @gpuaddr_lock: Spinlock to protect the gpuaddr from being accessed by * multiple entities trying to map the same SVM region at once */ spinlock_t gpuaddr_lock; }; }; /* /* Loading Loading @@ -291,6 +292,11 @@ struct kgsl_mem_entry { struct work_struct work; struct work_struct work; spinlock_t bind_lock; spinlock_t bind_lock; struct rb_root bind_tree; struct rb_root bind_tree; /** * @map_count: Count how many vmas this object is mapped in - used for * debugfs accounting */ atomic_t map_count; }; }; struct kgsl_device_private; struct kgsl_device_private; Loading
drivers/gpu/msm/kgsl_debugfs.c +13 −6 Original line number Original line Diff line number Diff line Loading @@ -147,7 +147,11 @@ static int print_mem_entry(void *data, void *ptr) flags[3] = get_alignflag(m); flags[3] = get_alignflag(m); flags[4] = get_cacheflag(m); flags[4] = get_cacheflag(m); flags[5] = kgsl_memdesc_use_cpu_map(m) ? 'p' : '-'; flags[5] = kgsl_memdesc_use_cpu_map(m) ? 'p' : '-'; flags[6] = (m->useraddr) ? 'Y' : 'N'; /* * Show Y if at least one vma has this entry * mapped (could be multiple) */ flags[6] = atomic_read(&entry->map_count) ? 'Y' : 'N'; flags[7] = kgsl_memdesc_is_secured(m) ? 's' : '-'; flags[7] = kgsl_memdesc_is_secured(m) ? 's' : '-'; flags[8] = m->flags & KGSL_MEMFLAGS_SPARSE_PHYS ? 'P' : '-'; flags[8] = m->flags & KGSL_MEMFLAGS_SPARSE_PHYS ? 'P' : '-'; flags[9] = '\0'; flags[9] = '\0'; Loading @@ -158,13 +162,16 @@ static int print_mem_entry(void *data, void *ptr) kgsl_get_egl_counts(entry, &egl_surface_count, kgsl_get_egl_counts(entry, &egl_surface_count, &egl_image_count); &egl_image_count); seq_printf(s, "%pK %pK %16llu %5d %9s %10s %16s %5d %16ld %6d %6d", seq_printf(s, "%pK %pK %16llu %5d %9s %10s %16s %5d %16d %6d %6d", (uint64_t *)(uintptr_t) m->gpuaddr, (uint64_t *)(uintptr_t) m->gpuaddr, (unsigned long *) m->useraddr, /* m->size, entry->id, flags, * Show zero for the useraddr - we can't reliably track * that value for multiple vmas anyway */ 0, m->size, entry->id, flags, memtype_str(usermem_type), memtype_str(usermem_type), usage, (m->sgt ? m->sgt->nents : 0), usage, (m->sgt ? m->sgt->nents : 0), atomic_long_read(&m->mapsize), atomic_read(&entry->map_count), egl_surface_count, egl_image_count); egl_surface_count, egl_image_count); if (entry->metadata[0] != 0) if (entry->metadata[0] != 0) Loading Loading @@ -235,7 +242,7 @@ static int process_mem_seq_show(struct seq_file *s, void *ptr) if (ptr == SEQ_START_TOKEN) { if (ptr == SEQ_START_TOKEN) { seq_printf(s, "%16s %16s %16s %5s %9s %10s %16s %5s %16s %6s %6s\n", seq_printf(s, "%16s %16s %16s %5s %9s %10s %16s %5s %16s %6s %6s\n", "gpuaddr", "useraddr", "size", "id", "flags", "type", "gpuaddr", "useraddr", "size", "id", "flags", "type", "usage", "sglen", "mapsize", "eglsrf", "eglimg"); "usage", "sglen", "mapcount", "eglsrf", "eglimg"); return 0; return 0; } else } else return print_mem_entry(s, ptr); return print_mem_entry(s, ptr); Loading
drivers/gpu/msm/kgsl_iommu.c +5 −0 Original line number Original line Diff line number Diff line Loading @@ -2489,6 +2489,11 @@ static int get_gpuaddr(struct kgsl_pagetable *pagetable, return -ENOMEM; return -ENOMEM; } } /* * This path is only called in a non-SVM path with locks so we can be * sure we aren't racing with anybody so we don't need to worry about * taking the lock */ ret = _insert_gpuaddr(pagetable, addr, size); ret = _insert_gpuaddr(pagetable, addr, size); spin_unlock(&pagetable->lock); spin_unlock(&pagetable->lock); Loading
drivers/gpu/msm/kgsl_mmu.c +8 −1 Original line number Original line Diff line number Diff line Loading @@ -436,10 +436,17 @@ void kgsl_mmu_put_gpuaddr(struct kgsl_memdesc *memdesc) if (PT_OP_VALID(pagetable, put_gpuaddr) && (unmap_fail == 0)) if (PT_OP_VALID(pagetable, put_gpuaddr) && (unmap_fail == 0)) pagetable->pt_ops->put_gpuaddr(memdesc); pagetable->pt_ops->put_gpuaddr(memdesc); memdesc->pagetable = NULL; /* * If SVM tries to take a GPU address it will lose the race until the * gpuaddr returns to zero so we shouldn't need to worry about taking a * lock here */ if (!kgsl_memdesc_is_global(memdesc)) if (!kgsl_memdesc_is_global(memdesc)) memdesc->gpuaddr = 0; memdesc->gpuaddr = 0; memdesc->pagetable = NULL; } } EXPORT_SYMBOL(kgsl_mmu_put_gpuaddr); EXPORT_SYMBOL(kgsl_mmu_put_gpuaddr); Loading