Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 56fa4bf2 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915: Update shrinker drm_i915_private naming convention



Switch over from the non-descript dev_priv locals to i915.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171123115338.10270-2-chris@chris-wilson.co.uk


Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
parent 848b365d
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -3911,7 +3911,7 @@ i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
				phys_addr_t size);

/* i915_gem_shrinker.c */
unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
unsigned long i915_gem_shrink(struct drm_i915_private *i915,
			      unsigned long target,
			      unsigned long *nr_scanned,
			      unsigned flags);
@@ -3920,9 +3920,9 @@ unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
#define I915_SHRINK_BOUND 0x4
#define I915_SHRINK_ACTIVE 0x8
#define I915_SHRINK_VMAPS 0x10
unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
void i915_gem_shrinker_register(struct drm_i915_private *dev_priv);
void i915_gem_shrinker_unregister(struct drm_i915_private *dev_priv);
unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
void i915_gem_shrinker_register(struct drm_i915_private *i915);
void i915_gem_shrinker_unregister(struct drm_i915_private *i915);


/* i915_gem_tiling.c */
+67 −67
Original line number Diff line number Diff line
@@ -35,9 +35,9 @@
#include "i915_drv.h"
#include "i915_trace.h"

static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
static bool shrinker_lock(struct drm_i915_private *i915, bool *unlock)
{
	switch (mutex_trylock_recursive(&dev_priv->drm.struct_mutex)) {
	switch (mutex_trylock_recursive(&i915->drm.struct_mutex)) {
	case MUTEX_TRYLOCK_RECURSIVE:
		*unlock = false;
		return true;
@@ -47,7 +47,7 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
		preempt_disable();
		do {
			cpu_relax();
			if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
			if (mutex_trylock(&i915->drm.struct_mutex)) {
				*unlock = true;
				break;
			}
@@ -63,12 +63,12 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
	BUG();
}

static void shrinker_unlock(struct drm_i915_private *dev_priv, bool unlock)
static void shrinker_unlock(struct drm_i915_private *i915, bool unlock)
{
	if (!unlock)
		return;

	mutex_unlock(&dev_priv->drm.struct_mutex);
	mutex_unlock(&i915->drm.struct_mutex);
}

static bool swap_available(void)
@@ -118,7 +118,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)

/**
 * i915_gem_shrink - Shrink buffer object caches
 * @dev_priv: i915 device
 * @i915: i915 device
 * @target: amount of memory to make available, in pages
 * @nr_scanned: optional output for number of pages scanned (incremental)
 * @flags: control flags for selecting cache types
@@ -142,7 +142,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
 * The number of pages of backing storage actually released.
 */
unsigned long
i915_gem_shrink(struct drm_i915_private *dev_priv,
i915_gem_shrink(struct drm_i915_private *i915,
		unsigned long target,
		unsigned long *nr_scanned,
		unsigned flags)
@@ -151,15 +151,15 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
		struct list_head *list;
		unsigned int bit;
	} phases[] = {
		{ &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
		{ &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
		{ &i915->mm.unbound_list, I915_SHRINK_UNBOUND },
		{ &i915->mm.bound_list, I915_SHRINK_BOUND },
		{ NULL, 0 },
	}, *phase;
	unsigned long count = 0;
	unsigned long scanned = 0;
	bool unlock;

	if (!shrinker_lock(dev_priv, &unlock))
	if (!shrinker_lock(i915, &unlock))
		return 0;

	/*
@@ -172,10 +172,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
	 * we will free as much as we can and hope to get a second chance.
	 */
	if (flags & I915_SHRINK_ACTIVE)
		i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
		i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);

	trace_i915_gem_shrink(dev_priv, target, flags);
	i915_gem_retire_requests(dev_priv);
	trace_i915_gem_shrink(i915, target, flags);
	i915_gem_retire_requests(i915);

	/*
	 * Unbinding of objects will require HW access; Let us not wake the
@@ -183,7 +183,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
	 * we will force the wake during oom-notifier.
	 */
	if ((flags & I915_SHRINK_BOUND) &&
	    !intel_runtime_pm_get_if_in_use(dev_priv))
	    !intel_runtime_pm_get_if_in_use(i915))
		flags &= ~I915_SHRINK_BOUND;

	/*
@@ -221,7 +221,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
		 * to be able to shrink their pages, so they remain on
		 * the unbound/bound list until actually freed.
		 */
		spin_lock(&dev_priv->mm.obj_lock);
		spin_lock(&i915->mm.obj_lock);
		while (count < target &&
		       (obj = list_first_entry_or_null(phase->list,
						       typeof(*obj),
@@ -244,7 +244,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
			if (!can_release_pages(obj))
				continue;

			spin_unlock(&dev_priv->mm.obj_lock);
			spin_unlock(&i915->mm.obj_lock);

			if (unsafe_drop_pages(obj)) {
				/* May arrive from get_pages on another bo */
@@ -258,18 +258,18 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
			}
			scanned += obj->base.size >> PAGE_SHIFT;

			spin_lock(&dev_priv->mm.obj_lock);
			spin_lock(&i915->mm.obj_lock);
		}
		list_splice_tail(&still_in_list, phase->list);
		spin_unlock(&dev_priv->mm.obj_lock);
		spin_unlock(&i915->mm.obj_lock);
	}

	if (flags & I915_SHRINK_BOUND)
		intel_runtime_pm_put(dev_priv);
		intel_runtime_pm_put(i915);

	i915_gem_retire_requests(dev_priv);
	i915_gem_retire_requests(i915);

	shrinker_unlock(dev_priv, unlock);
	shrinker_unlock(i915, unlock);

	if (nr_scanned)
		*nr_scanned += scanned;
@@ -278,7 +278,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,

/**
 * i915_gem_shrink_all - Shrink buffer object caches completely
 * @dev_priv: i915 device
 * @i915: i915 device
 *
 * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
 * caches completely. It also first waits for and retires all outstanding
@@ -290,16 +290,16 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
 * Returns:
 * The number of pages of backing storage actually released.
 */
unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
{
	unsigned long freed;

	intel_runtime_pm_get(dev_priv);
	freed = i915_gem_shrink(dev_priv, -1UL, NULL,
	intel_runtime_pm_get(i915);
	freed = i915_gem_shrink(i915, -1UL, NULL,
				I915_SHRINK_BOUND |
				I915_SHRINK_UNBOUND |
				I915_SHRINK_ACTIVE);
	intel_runtime_pm_put(dev_priv);
	intel_runtime_pm_put(i915);

	return freed;
}
@@ -347,53 +347,53 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
static unsigned long
i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
	struct drm_i915_private *dev_priv =
	struct drm_i915_private *i915 =
		container_of(shrinker, struct drm_i915_private, mm.shrinker);
	unsigned long freed;
	bool unlock;

	sc->nr_scanned = 0;

	if (!shrinker_lock(dev_priv, &unlock))
	if (!shrinker_lock(i915, &unlock))
		return SHRINK_STOP;

	freed = i915_gem_shrink(dev_priv,
	freed = i915_gem_shrink(i915,
				sc->nr_to_scan,
				&sc->nr_scanned,
				I915_SHRINK_BOUND |
				I915_SHRINK_UNBOUND |
				I915_SHRINK_PURGEABLE);
	if (freed < sc->nr_to_scan)
		freed += i915_gem_shrink(dev_priv,
		freed += i915_gem_shrink(i915,
					 sc->nr_to_scan - sc->nr_scanned,
					 &sc->nr_scanned,
					 I915_SHRINK_BOUND |
					 I915_SHRINK_UNBOUND);
	if (freed < sc->nr_to_scan && current_is_kswapd()) {
		intel_runtime_pm_get(dev_priv);
		freed += i915_gem_shrink(dev_priv,
		intel_runtime_pm_get(i915);
		freed += i915_gem_shrink(i915,
					 sc->nr_to_scan - sc->nr_scanned,
					 &sc->nr_scanned,
					 I915_SHRINK_ACTIVE |
					 I915_SHRINK_BOUND |
					 I915_SHRINK_UNBOUND);
		intel_runtime_pm_put(dev_priv);
		intel_runtime_pm_put(i915);
	}

	shrinker_unlock(dev_priv, unlock);
	shrinker_unlock(i915, unlock);

	return sc->nr_scanned ? freed : SHRINK_STOP;
}

static bool
shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, bool *unlock,
shrinker_lock_uninterruptible(struct drm_i915_private *i915, bool *unlock,
			      int timeout_ms)
{
	unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);

	do {
		if (i915_gem_wait_for_idle(dev_priv, 0) == 0 &&
		    shrinker_lock(dev_priv, unlock))
		if (i915_gem_wait_for_idle(i915, 0) == 0 &&
		    shrinker_lock(i915, unlock))
			break;

		schedule_timeout_killable(1);
@@ -412,32 +412,32 @@ shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, bool *unlock,
static int
i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
{
	struct drm_i915_private *dev_priv =
	struct drm_i915_private *i915 =
		container_of(nb, struct drm_i915_private, mm.oom_notifier);
	struct drm_i915_gem_object *obj;
	unsigned long unevictable, bound, unbound, freed_pages;

	freed_pages = i915_gem_shrink_all(dev_priv);
	freed_pages = i915_gem_shrink_all(i915);

	/* Because we may be allocating inside our own driver, we cannot
	 * assert that there are no objects with pinned pages that are not
	 * being pointed to by hardware.
	 */
	unbound = bound = unevictable = 0;
	spin_lock(&dev_priv->mm.obj_lock);
	list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
	spin_lock(&i915->mm.obj_lock);
	list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) {
		if (!can_release_pages(obj))
			unevictable += obj->base.size >> PAGE_SHIFT;
		else
			unbound += obj->base.size >> PAGE_SHIFT;
	}
	list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
	list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
		if (!can_release_pages(obj))
			unevictable += obj->base.size >> PAGE_SHIFT;
		else
			bound += obj->base.size >> PAGE_SHIFT;
	}
	spin_unlock(&dev_priv->mm.obj_lock);
	spin_unlock(&i915->mm.obj_lock);

	if (freed_pages || unbound || bound)
		pr_info("Purging GPU memory, %lu pages freed, "
@@ -455,39 +455,39 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
static int
i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
{
	struct drm_i915_private *dev_priv =
	struct drm_i915_private *i915 =
		container_of(nb, struct drm_i915_private, mm.vmap_notifier);
	struct i915_vma *vma, *next;
	unsigned long freed_pages = 0;
	bool unlock;
	int ret;

	if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000))
	if (!shrinker_lock_uninterruptible(i915, &unlock, 5000))
		return NOTIFY_DONE;

	/* Force everything onto the inactive lists */
	ret = i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
	ret = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
	if (ret)
		goto out;

	intel_runtime_pm_get(dev_priv);
	freed_pages += i915_gem_shrink(dev_priv, -1UL, NULL,
	intel_runtime_pm_get(i915);
	freed_pages += i915_gem_shrink(i915, -1UL, NULL,
				       I915_SHRINK_BOUND |
				       I915_SHRINK_UNBOUND |
				       I915_SHRINK_ACTIVE |
				       I915_SHRINK_VMAPS);
	intel_runtime_pm_put(dev_priv);
	intel_runtime_pm_put(i915);

	/* We also want to clear any cached iomaps as they wrap vmap */
	list_for_each_entry_safe(vma, next,
				 &dev_priv->ggtt.base.inactive_list, vm_link) {
				 &i915->ggtt.base.inactive_list, vm_link) {
		unsigned long count = vma->node.size >> PAGE_SHIFT;
		if (vma->iomap && i915_vma_unbind(vma) == 0)
			freed_pages += count;
	}

out:
	shrinker_unlock(dev_priv, unlock);
	shrinker_unlock(i915, unlock);

	*(unsigned long *)ptr += freed_pages;
	return NOTIFY_DONE;
@@ -495,34 +495,34 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr

/**
 * i915_gem_shrinker_register - Register the i915 shrinker
 * @dev_priv: i915 device
 * @i915: i915 device
 *
 * This function registers and sets up the i915 shrinker and OOM handler.
 */
void i915_gem_shrinker_register(struct drm_i915_private *dev_priv)
void i915_gem_shrinker_register(struct drm_i915_private *i915)
{
	dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
	dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
	dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
	dev_priv->mm.shrinker.batch = 4096;
	WARN_ON(register_shrinker(&dev_priv->mm.shrinker));
	i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
	i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
	i915->mm.shrinker.seeks = DEFAULT_SEEKS;
	i915->mm.shrinker.batch = 4096;
	WARN_ON(register_shrinker(&i915->mm.shrinker));

	dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
	WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier));
	i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
	WARN_ON(register_oom_notifier(&i915->mm.oom_notifier));

	dev_priv->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
	WARN_ON(register_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
	i915->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
	WARN_ON(register_vmap_purge_notifier(&i915->mm.vmap_notifier));
}

/**
 * i915_gem_shrinker_unregister - Unregisters the i915 shrinker
 * @dev_priv: i915 device
 * @i915: i915 device
 *
 * This function unregisters the i915 shrinker and OOM handler.
 */
void i915_gem_shrinker_unregister(struct drm_i915_private *dev_priv)
void i915_gem_shrinker_unregister(struct drm_i915_private *i915)
{
	WARN_ON(unregister_vmap_purge_notifier(&dev_priv->mm.vmap_notifier));
	WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
	unregister_shrinker(&dev_priv->mm.shrinker);
	WARN_ON(unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
	WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier));
	unregister_shrinker(&i915->mm.shrinker);
}