Loading drivers/gpu/ion/ion_cma_secure_heap.c +8 −1 Original line number Diff line number Diff line Loading @@ -356,7 +356,14 @@ static int ion_secure_cma_shrinker(struct shrinker *shrinker, if (!(sc->gfp_mask & __GFP_MOVABLE)) return atomic_read(&sheap->total_pool_size); mutex_lock(&sheap->chunk_lock); /* * Allocation path may invoke the shrinker. Proceeding any further * would cause a deadlock in several places so don't shrink if that * happens. */ if (!mutex_trylock(&sheap->chunk_lock)) return -1; list_for_each_safe(entry, _n, &sheap->chunks) { struct ion_cma_alloc_chunk *chunk = container_of(entry, struct ion_cma_alloc_chunk, entry); Loading Loading
drivers/gpu/ion/ion_cma_secure_heap.c +8 −1 Original line number Diff line number Diff line Loading @@ -356,7 +356,14 @@ static int ion_secure_cma_shrinker(struct shrinker *shrinker, if (!(sc->gfp_mask & __GFP_MOVABLE)) return atomic_read(&sheap->total_pool_size); mutex_lock(&sheap->chunk_lock); /* * Allocation path may invoke the shrinker. Proceeding any further * would cause a deadlock in several places so don't shrink if that * happens. */ if (!mutex_trylock(&sheap->chunk_lock)) return -1; list_for_each_safe(entry, _n, &sheap->chunks) { struct ion_cma_alloc_chunk *chunk = container_of(entry, struct ion_cma_alloc_chunk, entry); Loading