Loading drivers/gpu/msm/kgsl.c +3 −22 Original line number Diff line number Diff line Loading @@ -64,13 +64,6 @@ #define KGSL_DMA_BIT_MASK DMA_BIT_MASK(32) #endif /* * To accommodate legacy GPU address mmapping we need to make sure that the GPU * object won't conflict with the address space so define the IDs to start * at the top of the user address space region */ #define KGSL_GPUOBJ_ID_MIN (KGSL_SVM_UPPER_BOUND >> PAGE_SHIFT) /* * Define an kmem cache for the memobj structures since we allocate and free * them so frequently Loading Loading @@ -417,8 +410,7 @@ kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry, return -EBADF; idr_preload(GFP_KERNEL); spin_lock(&process->mem_lock); id = idr_alloc(&process->mem_idr, entry, KGSL_GPUOBJ_ID_MIN, 0, GFP_NOWAIT); id = idr_alloc(&process->mem_idr, entry, 1, 0, GFP_NOWAIT); spin_unlock(&process->mem_lock); idr_preload_end(); Loading Loading @@ -3773,13 +3765,8 @@ get_mmap_entry(struct kgsl_process_private *private, int ret = 0; struct kgsl_mem_entry *entry; /* * GPU object IDs start at KGSL_SVM_UPPER_BOUND >> PAGE_SHIFT. Anything * less is legacy GPU memory being mapped by address */ if (pgoff >= KGSL_GPUOBJ_ID_MIN) entry = kgsl_sharedmem_find_id(private, pgoff); else if (entry == NULL) entry = kgsl_sharedmem_find(private, pgoff << PAGE_SHIFT); if (!entry) Loading @@ -3792,12 +3779,6 @@ get_mmap_entry(struct kgsl_process_private *private, goto err_put; } /* External memory cannot be mapped */ if ((KGSL_MEMFLAGS_USERMEM_MASK & entry->memdesc.flags) != 0) { ret = -EINVAL; goto err_put; } if (entry->memdesc.useraddr != 0) { ret = -EBUSY; goto err_put; Loading Loading
drivers/gpu/msm/kgsl.c +3 −22 Original line number Diff line number Diff line Loading @@ -64,13 +64,6 @@ #define KGSL_DMA_BIT_MASK DMA_BIT_MASK(32) #endif /* * To accommodate legacy GPU address mmapping we need to make sure that the GPU * object won't conflict with the address space so define the IDs to start * at the top of the user address space region */ #define KGSL_GPUOBJ_ID_MIN (KGSL_SVM_UPPER_BOUND >> PAGE_SHIFT) /* * Define an kmem cache for the memobj structures since we allocate and free * them so frequently Loading Loading @@ -417,8 +410,7 @@ kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry, return -EBADF; idr_preload(GFP_KERNEL); spin_lock(&process->mem_lock); id = idr_alloc(&process->mem_idr, entry, KGSL_GPUOBJ_ID_MIN, 0, GFP_NOWAIT); id = idr_alloc(&process->mem_idr, entry, 1, 0, GFP_NOWAIT); spin_unlock(&process->mem_lock); idr_preload_end(); Loading Loading @@ -3773,13 +3765,8 @@ get_mmap_entry(struct kgsl_process_private *private, int ret = 0; struct kgsl_mem_entry *entry; /* * GPU object IDs start at KGSL_SVM_UPPER_BOUND >> PAGE_SHIFT. Anything * less is legacy GPU memory being mapped by address */ if (pgoff >= KGSL_GPUOBJ_ID_MIN) entry = kgsl_sharedmem_find_id(private, pgoff); else if (entry == NULL) entry = kgsl_sharedmem_find(private, pgoff << PAGE_SHIFT); if (!entry) Loading @@ -3792,12 +3779,6 @@ get_mmap_entry(struct kgsl_process_private *private, goto err_put; } /* External memory cannot be mapped */ if ((KGSL_MEMFLAGS_USERMEM_MASK & entry->memdesc.flags) != 0) { ret = -EINVAL; goto err_put; } if (entry->memdesc.useraddr != 0) { ret = -EBUSY; goto err_put; Loading