Loading arch/x86/kvm/mmu.c +18 −7 Original line number Diff line number Diff line Loading @@ -4421,13 +4421,12 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm) } } static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc) static unsigned long mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { struct kvm *kvm; int nr_to_scan = sc->nr_to_scan; if (nr_to_scan == 0) goto out; unsigned long freed = 0; raw_spin_lock(&kvm_lock); Loading Loading @@ -4462,25 +4461,37 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc) goto unlock; } prepare_zap_oldest_mmu_page(kvm, &invalid_list); if (prepare_zap_oldest_mmu_page(kvm, &invalid_list)) freed++; kvm_mmu_commit_zap_page(kvm, &invalid_list); unlock: spin_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); /* * unfair on small ones * per-vm shrinkers cry out * sadness comes quickly */ list_move_tail(&kvm->vm_list, &vm_list); break; } raw_spin_unlock(&kvm_lock); return freed; out: } static unsigned long mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { return percpu_counter_read_positive(&kvm_total_used_mmu_pages); } static struct shrinker mmu_shrinker = { .shrink = mmu_shrink, .count_objects = mmu_shrink_count, .scan_objects = mmu_shrink_scan, .seeks = DEFAULT_SEEKS * 10, }; Loading drivers/gpu/drm/i915/i915_dma.c +2 −2 Original line number Diff line number Diff line Loading @@ -1667,7 +1667,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) return 0; out_gem_unload: if (dev_priv->mm.inactive_shrinker.shrink) if (dev_priv->mm.inactive_shrinker.scan_objects) unregister_shrinker(&dev_priv->mm.inactive_shrinker); if (dev->pdev->msi_enabled) Loading Loading @@ -1706,7 +1706,7 @@ int i915_driver_unload(struct drm_device *dev) i915_teardown_sysfs(dev); if (dev_priv->mm.inactive_shrinker.shrink) if (dev_priv->mm.inactive_shrinker.scan_objects) unregister_shrinker(&dev_priv->mm.inactive_shrinker); mutex_lock(&dev->struct_mutex); Loading drivers/gpu/drm/i915/i915_gem.c +57 −25 Original line number Diff line number Diff line Loading @@ -57,10 +57,12 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, struct drm_i915_fence_reg *fence, bool enable); static int i915_gem_inactive_shrink(struct shrinker *shrinker, static unsigned long i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc); static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc); static long i915_gem_purge(struct drm_i915_private *dev_priv, long target); static void i915_gem_shrink_all(struct drm_i915_private *dev_priv); static long i915_gem_shrink_all(struct drm_i915_private *dev_priv); static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); static bool cpu_cache_is_coherent(struct drm_device *dev, Loading Loading @@ -1736,17 +1738,22 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target) return __i915_gem_shrink(dev_priv, target, true); } static void static long i915_gem_shrink_all(struct drm_i915_private *dev_priv) { struct drm_i915_gem_object *obj, *next; long freed = 0; i915_gem_evict_everything(dev_priv->dev); list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, global_list) global_list) { if (obj->pages_pin_count == 0) freed += obj->base.size >> PAGE_SHIFT; i915_gem_object_put_pages(obj); } return freed; } static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) Loading Loading @@ -4526,7 +4533,8 @@ i915_gem_load(struct drm_device *dev) dev_priv->mm.interruptible = true; dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink; dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan; dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count; dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS; register_shrinker(&dev_priv->mm.inactive_shrinker); } Loading Loading @@ -4749,8 +4757,8 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) #endif } static int i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) static unsigned long i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc) { struct drm_i915_private *dev_priv = container_of(shrinker, Loading @@ -4758,45 +4766,35 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) mm.inactive_shrinker); struct drm_device *dev = dev_priv->dev; struct drm_i915_gem_object *obj; int nr_to_scan = sc->nr_to_scan; bool unlock = true; int cnt; unsigned long count; if (!mutex_trylock(&dev->struct_mutex)) { if (!mutex_is_locked_by(&dev->struct_mutex, current)) return 0; return SHRINK_STOP; if (dev_priv->mm.shrinker_no_lock_stealing) return 0; return SHRINK_STOP; unlock = false; } if (nr_to_scan) { nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan); if (nr_to_scan > 0) nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan, false); if (nr_to_scan > 0) i915_gem_shrink_all(dev_priv); } cnt = 0; count = 0; list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) if (obj->pages_pin_count == 0) cnt += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT; list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { if (obj->active) continue; if (obj->pin_count == 0 && obj->pages_pin_count == 0) cnt += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT; } if (unlock) mutex_unlock(&dev->struct_mutex); return cnt; return count; } /* All the new VM stuff */ Loading Loading @@ -4860,6 +4858,40 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, return 0; } static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc) { struct drm_i915_private *dev_priv = container_of(shrinker, struct drm_i915_private, mm.inactive_shrinker); struct drm_device *dev = dev_priv->dev; int nr_to_scan = sc->nr_to_scan; unsigned long freed; bool unlock = true; if (!mutex_trylock(&dev->struct_mutex)) { if (!mutex_is_locked_by(&dev->struct_mutex, current)) return 0; if (dev_priv->mm.shrinker_no_lock_stealing) return 0; unlock = false; } freed = i915_gem_purge(dev_priv, nr_to_scan); if (freed < nr_to_scan) freed += __i915_gem_shrink(dev_priv, nr_to_scan, false); if (freed < nr_to_scan) freed += i915_gem_shrink_all(dev_priv); if (unlock) mutex_unlock(&dev->struct_mutex); return freed; } struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, struct i915_address_space *vm) { Loading drivers/gpu/drm/ttm/ttm_page_alloc.c +28 −16 Original line number Diff line number Diff line Loading @@ -377,28 +377,26 @@ out: return nr_free; } /* Get good estimation how many pages are free in pools */ static int ttm_pool_get_num_unused_pages(void) { unsigned i; int total = 0; for (i = 0; i < NUM_POOLS; ++i) total += _manager->pools[i].npages; return total; } /** * Callback for mm to request pool to reduce number of page held. * * XXX: (dchinner) Deadlock warning! * * ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means * this can deadlock when called a sc->gfp_mask that is not equal to * GFP_KERNEL. * * This code is crying out for a shrinker per pool.... */ static int ttm_pool_mm_shrink(struct shrinker *shrink, struct shrink_control *sc) static unsigned long ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { static atomic_t start_pool = ATOMIC_INIT(0); unsigned i; unsigned pool_offset = atomic_add_return(1, &start_pool); struct ttm_page_pool *pool; int shrink_pages = sc->nr_to_scan; unsigned long freed = 0; pool_offset = pool_offset % NUM_POOLS; /* select start pool in round robin fashion */ Loading @@ -408,14 +406,28 @@ static int ttm_pool_mm_shrink(struct shrinker *shrink, break; pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; shrink_pages = ttm_page_pool_free(pool, nr_free); freed += nr_free - shrink_pages; } /* return estimated number of unused pages in pool */ return ttm_pool_get_num_unused_pages(); return freed; } static unsigned long ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { unsigned i; unsigned long count = 0; for (i = 0; i < NUM_POOLS; ++i) count += _manager->pools[i].npages; return count; } static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) { manager->mm_shrink.shrink = &ttm_pool_mm_shrink; manager->mm_shrink.count_objects = ttm_pool_shrink_count; manager->mm_shrink.scan_objects = ttm_pool_shrink_scan; manager->mm_shrink.seeks = 1; register_shrinker(&manager->mm_shrink); } Loading drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +32 −19 Original line number Diff line number Diff line Loading @@ -918,19 +918,6 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev) } EXPORT_SYMBOL_GPL(ttm_dma_populate); /* Get good estimation how many pages are free in pools */ static int ttm_dma_pool_get_num_unused_pages(void) { struct device_pools *p; unsigned total = 0; mutex_lock(&_manager->lock); list_for_each_entry(p, &_manager->pools, pools) total += p->pool->npages_free; mutex_unlock(&_manager->lock); return total; } /* Put all pages in pages list to correct pool to wait for reuse */ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) { Loading Loading @@ -1002,18 +989,29 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); /** * Callback for mm to request pool to reduce number of page held. * * XXX: (dchinner) Deadlock warning! * * ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention * needs to be paid to sc->gfp_mask to determine if this can be done or not. * GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really * bad. * * I'm getting sadder as I hear more pathetical whimpers about needing per-pool * shrinkers */ static int ttm_dma_pool_mm_shrink(struct shrinker *shrink, struct shrink_control *sc) static unsigned long ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { static atomic_t start_pool = ATOMIC_INIT(0); unsigned idx = 0; unsigned pool_offset = atomic_add_return(1, &start_pool); unsigned shrink_pages = sc->nr_to_scan; struct device_pools *p; unsigned long freed = 0; if (list_empty(&_manager->pools)) return 0; return SHRINK_STOP; mutex_lock(&_manager->lock); pool_offset = pool_offset % _manager->npools; Loading @@ -1029,18 +1027,33 @@ static int ttm_dma_pool_mm_shrink(struct shrinker *shrink, continue; nr_free = shrink_pages; shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free); freed += nr_free - shrink_pages; pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n", p->pool->dev_name, p->pool->name, current->pid, nr_free, shrink_pages); } mutex_unlock(&_manager->lock); /* return estimated number of unused pages in pool */ return ttm_dma_pool_get_num_unused_pages(); return freed; } static unsigned long ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { struct device_pools *p; unsigned long count = 0; mutex_lock(&_manager->lock); list_for_each_entry(p, &_manager->pools, pools) count += p->pool->npages_free; mutex_unlock(&_manager->lock); return count; } static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager) { manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink; manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count; manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan; manager->mm_shrink.seeks = 1; register_shrinker(&manager->mm_shrink); } Loading Loading
arch/x86/kvm/mmu.c +18 −7 Original line number Diff line number Diff line Loading @@ -4421,13 +4421,12 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm) } } static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc) static unsigned long mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { struct kvm *kvm; int nr_to_scan = sc->nr_to_scan; if (nr_to_scan == 0) goto out; unsigned long freed = 0; raw_spin_lock(&kvm_lock); Loading Loading @@ -4462,25 +4461,37 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc) goto unlock; } prepare_zap_oldest_mmu_page(kvm, &invalid_list); if (prepare_zap_oldest_mmu_page(kvm, &invalid_list)) freed++; kvm_mmu_commit_zap_page(kvm, &invalid_list); unlock: spin_unlock(&kvm->mmu_lock); srcu_read_unlock(&kvm->srcu, idx); /* * unfair on small ones * per-vm shrinkers cry out * sadness comes quickly */ list_move_tail(&kvm->vm_list, &vm_list); break; } raw_spin_unlock(&kvm_lock); return freed; out: } static unsigned long mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { return percpu_counter_read_positive(&kvm_total_used_mmu_pages); } static struct shrinker mmu_shrinker = { .shrink = mmu_shrink, .count_objects = mmu_shrink_count, .scan_objects = mmu_shrink_scan, .seeks = DEFAULT_SEEKS * 10, }; Loading
drivers/gpu/drm/i915/i915_dma.c +2 −2 Original line number Diff line number Diff line Loading @@ -1667,7 +1667,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) return 0; out_gem_unload: if (dev_priv->mm.inactive_shrinker.shrink) if (dev_priv->mm.inactive_shrinker.scan_objects) unregister_shrinker(&dev_priv->mm.inactive_shrinker); if (dev->pdev->msi_enabled) Loading Loading @@ -1706,7 +1706,7 @@ int i915_driver_unload(struct drm_device *dev) i915_teardown_sysfs(dev); if (dev_priv->mm.inactive_shrinker.shrink) if (dev_priv->mm.inactive_shrinker.scan_objects) unregister_shrinker(&dev_priv->mm.inactive_shrinker); mutex_lock(&dev->struct_mutex); Loading
drivers/gpu/drm/i915/i915_gem.c +57 −25 Original line number Diff line number Diff line Loading @@ -57,10 +57,12 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, struct drm_i915_fence_reg *fence, bool enable); static int i915_gem_inactive_shrink(struct shrinker *shrinker, static unsigned long i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc); static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc); static long i915_gem_purge(struct drm_i915_private *dev_priv, long target); static void i915_gem_shrink_all(struct drm_i915_private *dev_priv); static long i915_gem_shrink_all(struct drm_i915_private *dev_priv); static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); static bool cpu_cache_is_coherent(struct drm_device *dev, Loading Loading @@ -1736,17 +1738,22 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target) return __i915_gem_shrink(dev_priv, target, true); } static void static long i915_gem_shrink_all(struct drm_i915_private *dev_priv) { struct drm_i915_gem_object *obj, *next; long freed = 0; i915_gem_evict_everything(dev_priv->dev); list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, global_list) global_list) { if (obj->pages_pin_count == 0) freed += obj->base.size >> PAGE_SHIFT; i915_gem_object_put_pages(obj); } return freed; } static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) Loading Loading @@ -4526,7 +4533,8 @@ i915_gem_load(struct drm_device *dev) dev_priv->mm.interruptible = true; dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink; dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan; dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count; dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS; register_shrinker(&dev_priv->mm.inactive_shrinker); } Loading Loading @@ -4749,8 +4757,8 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) #endif } static int i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) static unsigned long i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc) { struct drm_i915_private *dev_priv = container_of(shrinker, Loading @@ -4758,45 +4766,35 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) mm.inactive_shrinker); struct drm_device *dev = dev_priv->dev; struct drm_i915_gem_object *obj; int nr_to_scan = sc->nr_to_scan; bool unlock = true; int cnt; unsigned long count; if (!mutex_trylock(&dev->struct_mutex)) { if (!mutex_is_locked_by(&dev->struct_mutex, current)) return 0; return SHRINK_STOP; if (dev_priv->mm.shrinker_no_lock_stealing) return 0; return SHRINK_STOP; unlock = false; } if (nr_to_scan) { nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan); if (nr_to_scan > 0) nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan, false); if (nr_to_scan > 0) i915_gem_shrink_all(dev_priv); } cnt = 0; count = 0; list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) if (obj->pages_pin_count == 0) cnt += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT; list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { if (obj->active) continue; if (obj->pin_count == 0 && obj->pages_pin_count == 0) cnt += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT; } if (unlock) mutex_unlock(&dev->struct_mutex); return cnt; return count; } /* All the new VM stuff */ Loading Loading @@ -4860,6 +4858,40 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, return 0; } static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc) { struct drm_i915_private *dev_priv = container_of(shrinker, struct drm_i915_private, mm.inactive_shrinker); struct drm_device *dev = dev_priv->dev; int nr_to_scan = sc->nr_to_scan; unsigned long freed; bool unlock = true; if (!mutex_trylock(&dev->struct_mutex)) { if (!mutex_is_locked_by(&dev->struct_mutex, current)) return 0; if (dev_priv->mm.shrinker_no_lock_stealing) return 0; unlock = false; } freed = i915_gem_purge(dev_priv, nr_to_scan); if (freed < nr_to_scan) freed += __i915_gem_shrink(dev_priv, nr_to_scan, false); if (freed < nr_to_scan) freed += i915_gem_shrink_all(dev_priv); if (unlock) mutex_unlock(&dev->struct_mutex); return freed; } struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, struct i915_address_space *vm) { Loading
drivers/gpu/drm/ttm/ttm_page_alloc.c +28 −16 Original line number Diff line number Diff line Loading @@ -377,28 +377,26 @@ out: return nr_free; } /* Get good estimation how many pages are free in pools */ static int ttm_pool_get_num_unused_pages(void) { unsigned i; int total = 0; for (i = 0; i < NUM_POOLS; ++i) total += _manager->pools[i].npages; return total; } /** * Callback for mm to request pool to reduce number of page held. * * XXX: (dchinner) Deadlock warning! * * ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means * this can deadlock when called a sc->gfp_mask that is not equal to * GFP_KERNEL. * * This code is crying out for a shrinker per pool.... */ static int ttm_pool_mm_shrink(struct shrinker *shrink, struct shrink_control *sc) static unsigned long ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { static atomic_t start_pool = ATOMIC_INIT(0); unsigned i; unsigned pool_offset = atomic_add_return(1, &start_pool); struct ttm_page_pool *pool; int shrink_pages = sc->nr_to_scan; unsigned long freed = 0; pool_offset = pool_offset % NUM_POOLS; /* select start pool in round robin fashion */ Loading @@ -408,14 +406,28 @@ static int ttm_pool_mm_shrink(struct shrinker *shrink, break; pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; shrink_pages = ttm_page_pool_free(pool, nr_free); freed += nr_free - shrink_pages; } /* return estimated number of unused pages in pool */ return ttm_pool_get_num_unused_pages(); return freed; } static unsigned long ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { unsigned i; unsigned long count = 0; for (i = 0; i < NUM_POOLS; ++i) count += _manager->pools[i].npages; return count; } static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) { manager->mm_shrink.shrink = &ttm_pool_mm_shrink; manager->mm_shrink.count_objects = ttm_pool_shrink_count; manager->mm_shrink.scan_objects = ttm_pool_shrink_scan; manager->mm_shrink.seeks = 1; register_shrinker(&manager->mm_shrink); } Loading
drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +32 −19 Original line number Diff line number Diff line Loading @@ -918,19 +918,6 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev) } EXPORT_SYMBOL_GPL(ttm_dma_populate); /* Get good estimation how many pages are free in pools */ static int ttm_dma_pool_get_num_unused_pages(void) { struct device_pools *p; unsigned total = 0; mutex_lock(&_manager->lock); list_for_each_entry(p, &_manager->pools, pools) total += p->pool->npages_free; mutex_unlock(&_manager->lock); return total; } /* Put all pages in pages list to correct pool to wait for reuse */ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) { Loading Loading @@ -1002,18 +989,29 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); /** * Callback for mm to request pool to reduce number of page held. * * XXX: (dchinner) Deadlock warning! * * ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention * needs to be paid to sc->gfp_mask to determine if this can be done or not. * GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really * bad. * * I'm getting sadder as I hear more pathetical whimpers about needing per-pool * shrinkers */ static int ttm_dma_pool_mm_shrink(struct shrinker *shrink, struct shrink_control *sc) static unsigned long ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { static atomic_t start_pool = ATOMIC_INIT(0); unsigned idx = 0; unsigned pool_offset = atomic_add_return(1, &start_pool); unsigned shrink_pages = sc->nr_to_scan; struct device_pools *p; unsigned long freed = 0; if (list_empty(&_manager->pools)) return 0; return SHRINK_STOP; mutex_lock(&_manager->lock); pool_offset = pool_offset % _manager->npools; Loading @@ -1029,18 +1027,33 @@ static int ttm_dma_pool_mm_shrink(struct shrinker *shrink, continue; nr_free = shrink_pages; shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free); freed += nr_free - shrink_pages; pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n", p->pool->dev_name, p->pool->name, current->pid, nr_free, shrink_pages); } mutex_unlock(&_manager->lock); /* return estimated number of unused pages in pool */ return ttm_dma_pool_get_num_unused_pages(); return freed; } static unsigned long ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { struct device_pools *p; unsigned long count = 0; mutex_lock(&_manager->lock); list_for_each_entry(p, &_manager->pools, pools) count += p->pool->npages_free; mutex_unlock(&_manager->lock); return count; } static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager) { manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink; manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count; manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan; manager->mm_shrink.seeks = 1; register_shrinker(&manager->mm_shrink); } Loading