Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5fd9cbad authored by Thomas Hellstrom's avatar Thomas Hellstrom Committed by Dave Airlie
Browse files

drm/ttm: Memory accounting rework.



Use inclusive zones to simplify accounting and its sysfs representation.
Use DMA32 accounting where applicable.

Add a sysfs interface to make the heuristically determined limits
readable and configurable.

Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: default avatarDave Airlie <airlied@linux.ie>
parent e9840be8
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -70,7 +70,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
	if (bo->destroy)
		bo->destroy(bo);
	else {
		ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false);
		ttm_mem_global_free(bdev->mem_glob, bo->acc_size);
		kfree(bo);
	}
}
@@ -1065,14 +1065,14 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,

	size_t acc_size =
	    ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
	if (unlikely(ret != 0))
		return ret;

	bo = kzalloc(sizeof(*bo), GFP_KERNEL);

	if (unlikely(bo == NULL)) {
		ttm_mem_global_free(mem_glob, acc_size, false);
		ttm_mem_global_free(mem_glob, acc_size);
		return -ENOMEM;
	}

+1 −3
Original line number Diff line number Diff line
@@ -71,7 +71,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)

	mutex_lock(&item->mutex);
	if (item->refcount == 0) {
		item->object = kmalloc(ref->size, GFP_KERNEL);
		item->object = kzalloc(ref->size, GFP_KERNEL);
		if (unlikely(item->object == NULL)) {
			ret = -ENOMEM;
			goto out_err;
@@ -89,7 +89,6 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
	mutex_unlock(&item->mutex);
	return 0;
out_err:
	kfree(item->object);
	mutex_unlock(&item->mutex);
	item->object = NULL;
	return ret;
@@ -105,7 +104,6 @@ void ttm_global_item_unref(struct ttm_global_reference *ref)
	BUG_ON(ref->object != item->object);
	if (--item->refcount == 0) {
		ref->release(ref);
		kfree(item->object);
		item->object = NULL;
	}
	mutex_unlock(&item->mutex);
+412 −76
Original line number Diff line number Diff line
@@ -26,15 +26,180 @@
 **************************************************************************/

#include "ttm/ttm_memory.h"
#include "ttm/ttm_module.h"
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/module.h>

#define TTM_PFX "[TTM] "
#define TTM_MEMORY_ALLOC_RETRIES 4

struct ttm_mem_zone {
	struct kobject kobj;
	struct ttm_mem_global *glob;
	const char *name;
	uint64_t zone_mem;
	uint64_t emer_mem;
	uint64_t max_mem;
	uint64_t swap_limit;
	uint64_t used_mem;
};

static struct attribute ttm_mem_sys = {
	.name = "zone_memory",
	.mode = S_IRUGO
};
static struct attribute ttm_mem_emer = {
	.name = "emergency_memory",
	.mode = S_IRUGO | S_IWUSR
};
static struct attribute ttm_mem_max = {
	.name = "available_memory",
	.mode = S_IRUGO | S_IWUSR
};
static struct attribute ttm_mem_swap = {
	.name = "swap_limit",
	.mode = S_IRUGO | S_IWUSR
};
static struct attribute ttm_mem_used = {
	.name = "used_memory",
	.mode = S_IRUGO
};

static void ttm_mem_zone_kobj_release(struct kobject *kobj)
{
	struct ttm_mem_zone *zone =
		container_of(kobj, struct ttm_mem_zone, kobj);

	printk(KERN_INFO TTM_PFX
	       "Zone %7s: Used memory at exit: %llu kiB.\n",
	       zone->name, (unsigned long long) zone->used_mem >> 10);
	kfree(zone);
}

static ssize_t ttm_mem_zone_show(struct kobject *kobj,
				 struct attribute *attr,
				 char *buffer)
{
	struct ttm_mem_zone *zone =
		container_of(kobj, struct ttm_mem_zone, kobj);
	uint64_t val = 0;

	spin_lock(&zone->glob->lock);
	if (attr == &ttm_mem_sys)
		val = zone->zone_mem;
	else if (attr == &ttm_mem_emer)
		val = zone->emer_mem;
	else if (attr == &ttm_mem_max)
		val = zone->max_mem;
	else if (attr == &ttm_mem_swap)
		val = zone->swap_limit;
	else if (attr == &ttm_mem_used)
		val = zone->used_mem;
	spin_unlock(&zone->glob->lock);

	return snprintf(buffer, PAGE_SIZE, "%llu\n",
			(unsigned long long) val >> 10);
}

static void ttm_check_swapping(struct ttm_mem_global *glob);

static ssize_t ttm_mem_zone_store(struct kobject *kobj,
				  struct attribute *attr,
				  const char *buffer,
				  size_t size)
{
	struct ttm_mem_zone *zone =
		container_of(kobj, struct ttm_mem_zone, kobj);
	int chars;
	unsigned long val;
	uint64_t val64;

	chars = sscanf(buffer, "%lu", &val);
	if (chars == 0)
		return size;

	val64 = val;
	val64 <<= 10;

	spin_lock(&zone->glob->lock);
	if (val64 > zone->zone_mem)
		val64 = zone->zone_mem;
	if (attr == &ttm_mem_emer) {
		zone->emer_mem = val64;
		if (zone->max_mem > val64)
			zone->max_mem = val64;
	} else if (attr == &ttm_mem_max) {
		zone->max_mem = val64;
		if (zone->emer_mem < val64)
			zone->emer_mem = val64;
	} else if (attr == &ttm_mem_swap)
		zone->swap_limit = val64;
	spin_unlock(&zone->glob->lock);

	ttm_check_swapping(zone->glob);

	return size;
}

static struct attribute *ttm_mem_zone_attrs[] = {
	&ttm_mem_sys,
	&ttm_mem_emer,
	&ttm_mem_max,
	&ttm_mem_swap,
	&ttm_mem_used,
	NULL
};

static struct sysfs_ops ttm_mem_zone_ops = {
	.show = &ttm_mem_zone_show,
	.store = &ttm_mem_zone_store
};

static struct kobj_type ttm_mem_zone_kobj_type = {
	.release = &ttm_mem_zone_kobj_release,
	.sysfs_ops = &ttm_mem_zone_ops,
	.default_attrs = ttm_mem_zone_attrs,
};

static void ttm_mem_global_kobj_release(struct kobject *kobj)
{
	struct ttm_mem_global *glob =
		container_of(kobj, struct ttm_mem_global, kobj);

	kfree(glob);
}

static struct kobj_type ttm_mem_glob_kobj_type = {
	.release = &ttm_mem_global_kobj_release,
};

static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
					bool from_wq, uint64_t extra)
{
	unsigned int i;
	struct ttm_mem_zone *zone;
	uint64_t target;

	for (i = 0; i < glob->num_zones; ++i) {
		zone = glob->zones[i];

		if (from_wq)
			target = zone->swap_limit;
		else if (capable(CAP_SYS_ADMIN))
			target = zone->emer_mem;
		else
			target = zone->max_mem;

		target = (extra > target) ? 0ULL : target;

		if (zone->used_mem > target)
			return true;
	}
	return false;
}

/**
 * At this point we only support a single shrink callback.
 * Extend this if needed, perhaps using a linked list of callbacks.
@@ -42,34 +207,17 @@
 * many threads may try to swap out at any given time.
 */

static void ttm_shrink(struct ttm_mem_global *glob, bool from_workqueue,
static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
		       uint64_t extra)
{
	int ret;
	struct ttm_mem_shrink *shrink;
	uint64_t target;
	uint64_t total_target;

	spin_lock(&glob->lock);
	if (glob->shrink == NULL)
		goto out;

	if (from_workqueue) {
		target = glob->swap_limit;
		total_target = glob->total_memory_swap_limit;
	} else if (capable(CAP_SYS_ADMIN)) {
		total_target = glob->emer_total_memory;
		target = glob->emer_memory;
	} else {
		total_target = glob->max_total_memory;
		target = glob->max_memory;
	}

	total_target = (extra >= total_target) ? 0 : total_target - extra;
	target = (extra >= target) ? 0 : target - extra;

	while (glob->used_memory > target ||
	       glob->used_total_memory > total_target) {
	while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
		shrink = glob->shrink;
		spin_unlock(&glob->lock);
		ret = shrink->do_shrink(shrink);
@@ -81,6 +229,8 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_workqueue,
	spin_unlock(&glob->lock);
}



static void ttm_shrink_work(struct work_struct *work)
{
	struct ttm_mem_global *glob =
@@ -89,63 +239,178 @@ static void ttm_shrink_work(struct work_struct *work)
	ttm_shrink(glob, true, 0ULL);
}

int ttm_mem_global_init(struct ttm_mem_global *glob)
static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
				    const struct sysinfo *si)
{
	struct sysinfo si;
	struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
	uint64_t mem;

	spin_lock_init(&glob->lock);
	glob->swap_queue = create_singlethread_workqueue("ttm_swap");
	INIT_WORK(&glob->work, ttm_shrink_work);
	init_waitqueue_head(&glob->queue);
	if (unlikely(!zone))
		return -ENOMEM;

	si_meminfo(&si);
	mem = si->totalram - si->totalhigh;
	mem *= si->mem_unit;

	zone->name = "kernel";
	zone->zone_mem = mem;
	zone->max_mem = mem >> 1;
	zone->emer_mem = (mem >> 1) + (mem >> 2);
	zone->swap_limit = zone->max_mem - (mem >> 3);
	zone->used_mem = 0;
	zone->glob = glob;
	glob->zone_kernel = zone;
	glob->zones[glob->num_zones++] = zone;
	kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type);
	return kobject_add(&zone->kobj, &glob->kobj, zone->name);
}

#ifdef CONFIG_HIGHMEM
static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
				     const struct sysinfo *si)
{
	struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
	uint64_t mem;

	if (unlikely(!zone))
		return -ENOMEM;

	if (si->totalhigh == 0)
		return 0;

	mem = si->totalram;
	mem *= si->mem_unit;

	zone->name = "highmem";
	zone->zone_mem = mem;
	zone->max_mem = mem >> 1;
	zone->emer_mem = (mem >> 1) + (mem >> 2);
	zone->swap_limit = zone->max_mem - (mem >> 3);
	zone->used_mem = 0;
	zone->glob = glob;
	glob->zone_highmem = zone;
	glob->zones[glob->num_zones++] = zone;
	kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type);
	return kobject_add(&zone->kobj, &glob->kobj, zone->name);
}
#else
static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
				   const struct sysinfo *si)
{
	struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
	uint64_t mem;

	if (unlikely(!zone))
		return -ENOMEM;

	mem = si->totalram;
	mem *= si->mem_unit;

	mem = si.totalram - si.totalhigh;
	mem *= si.mem_unit;
	/**
	 * No special dma32 zone needed.
	 */

	glob->max_memory = mem >> 1;
	glob->emer_memory = (mem >> 1) + (mem >> 2);
	glob->swap_limit = glob->max_memory - (mem >> 3);
	glob->used_memory = 0;
	glob->used_total_memory = 0;
	glob->shrink = NULL;
	if (mem <= ((uint64_t) 1ULL << 32))
		return 0;

	mem = si.totalram;
	mem *= si.mem_unit;
	/*
	 * Limit max dma32 memory to 4GB for now
	 * until we can figure out how big this
	 * zone really is.
	 */

	glob->max_total_memory = mem >> 1;
	glob->emer_total_memory = (mem >> 1) + (mem >> 2);
	mem = ((uint64_t) 1ULL << 32);
	zone->name = "dma32";
	zone->zone_mem = mem;
	zone->max_mem = mem >> 1;
	zone->emer_mem = (mem >> 1) + (mem >> 2);
	zone->swap_limit = zone->max_mem - (mem >> 3);
	zone->used_mem = 0;
	zone->glob = glob;
	glob->zone_dma32 = zone;
	glob->zones[glob->num_zones++] = zone;
	kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type);
	return kobject_add(&zone->kobj, &glob->kobj, zone->name);
}
#endif

	glob->total_memory_swap_limit = glob->max_total_memory - (mem >> 3);
int ttm_mem_global_init(struct ttm_mem_global *glob)
{
	struct sysinfo si;
	int ret;
	int i;
	struct ttm_mem_zone *zone;

	spin_lock_init(&glob->lock);
	glob->swap_queue = create_singlethread_workqueue("ttm_swap");
	INIT_WORK(&glob->work, ttm_shrink_work);
	init_waitqueue_head(&glob->queue);
	kobject_init(&glob->kobj, &ttm_mem_glob_kobj_type);
	ret = kobject_add(&glob->kobj,
			  ttm_get_kobj(),
			  "memory_accounting");
	if (unlikely(ret != 0))
		goto out_no_zone;

	printk(KERN_INFO TTM_PFX "TTM available graphics memory: %llu MiB\n",
	       glob->max_total_memory >> 20);
	printk(KERN_INFO TTM_PFX "TTM available object memory: %llu MiB\n",
	       glob->max_memory >> 20);
	si_meminfo(&si);

	ret = ttm_mem_init_kernel_zone(glob, &si);
	if (unlikely(ret != 0))
		goto out_no_zone;
#ifdef CONFIG_HIGHMEM
	ret = ttm_mem_init_highmem_zone(glob, &si);
	if (unlikely(ret != 0))
		goto out_no_zone;
#else
	ret = ttm_mem_init_dma32_zone(glob, &si);
	if (unlikely(ret != 0))
		goto out_no_zone;
#endif
	for (i = 0; i < glob->num_zones; ++i) {
		zone = glob->zones[i];
		printk(KERN_INFO TTM_PFX
		       "Zone %7s: Available graphics memory: %llu kiB.\n",
		       zone->name, (unsigned long long) zone->max_mem >> 10);
	}
	return 0;
out_no_zone:
	ttm_mem_global_release(glob);
	return ret;
}
EXPORT_SYMBOL(ttm_mem_global_init);

void ttm_mem_global_release(struct ttm_mem_global *glob)
{
	printk(KERN_INFO TTM_PFX "Used total memory is %llu bytes.\n",
	       (unsigned long long)glob->used_total_memory);
	unsigned int i;
	struct ttm_mem_zone *zone;

	flush_workqueue(glob->swap_queue);
	destroy_workqueue(glob->swap_queue);
	glob->swap_queue = NULL;
	for (i = 0; i < glob->num_zones; ++i) {
		zone = glob->zones[i];
		kobject_del(&zone->kobj);
		kobject_put(&zone->kobj);
	}
	kobject_del(&glob->kobj);
	kobject_put(&glob->kobj);
}
EXPORT_SYMBOL(ttm_mem_global_release);

static inline void ttm_check_swapping(struct ttm_mem_global *glob)
static void ttm_check_swapping(struct ttm_mem_global *glob)
{
	bool needs_swapping;
	bool needs_swapping = false;
	unsigned int i;
	struct ttm_mem_zone *zone;

	spin_lock(&glob->lock);
	needs_swapping = (glob->used_memory > glob->swap_limit ||
			  glob->used_total_memory >
			  glob->total_memory_swap_limit);
	for (i = 0; i < glob->num_zones; ++i) {
		zone = glob->zones[i];
		if (zone->used_mem > zone->swap_limit) {
			needs_swapping = true;
			break;
		}
	}

	spin_unlock(&glob->lock);

	if (unlikely(needs_swapping))
@@ -153,44 +418,60 @@ static inline void ttm_check_swapping(struct ttm_mem_global *glob)

}

void ttm_mem_global_free(struct ttm_mem_global *glob,
			 uint64_t amount, bool himem)
static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
				     struct ttm_mem_zone *single_zone,
				     uint64_t amount)
{
	unsigned int i;
	struct ttm_mem_zone *zone;

	spin_lock(&glob->lock);
	glob->used_total_memory -= amount;
	if (!himem)
		glob->used_memory -= amount;
	wake_up_all(&glob->queue);
	for (i = 0; i < glob->num_zones; ++i) {
		zone = glob->zones[i];
		if (single_zone && zone != single_zone)
			continue;
		zone->used_mem -= amount;
	}
	spin_unlock(&glob->lock);
}

void ttm_mem_global_free(struct ttm_mem_global *glob,
			 uint64_t amount)
{
	return ttm_mem_global_free_zone(glob, NULL, amount);
}

static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
				  uint64_t amount, bool himem, bool reserve)
				  struct ttm_mem_zone *single_zone,
				  uint64_t amount, bool reserve)
{
	uint64_t limit;
	uint64_t lomem_limit;
	int ret = -ENOMEM;
	unsigned int i;
	struct ttm_mem_zone *zone;

	spin_lock(&glob->lock);
	for (i = 0; i < glob->num_zones; ++i) {
		zone = glob->zones[i];
		if (single_zone && zone != single_zone)
			continue;

	if (capable(CAP_SYS_ADMIN)) {
		limit = glob->emer_total_memory;
		lomem_limit = glob->emer_memory;
	} else {
		limit = glob->max_total_memory;
		lomem_limit = glob->max_memory;
	}
		limit = (capable(CAP_SYS_ADMIN)) ?
			zone->emer_mem : zone->max_mem;

	if (unlikely(glob->used_total_memory + amount > limit))
		goto out_unlock;
	if (unlikely(!himem && glob->used_memory + amount > lomem_limit))
		if (zone->used_mem > limit)
			goto out_unlock;
	}

	if (reserve) {
		glob->used_total_memory += amount;
		if (!himem)
			glob->used_memory += amount;
		for (i = 0; i < glob->num_zones; ++i) {
			zone = glob->zones[i];
			if (single_zone && zone != single_zone)
				continue;
			zone->used_mem += amount;
		}
	}

	ret = 0;
out_unlock:
	spin_unlock(&glob->lock);
@@ -199,12 +480,17 @@ static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
	return ret;
}

int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
			 bool no_wait, bool interruptible, bool himem)

static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
				     struct ttm_mem_zone *single_zone,
				     uint64_t memory,
				     bool no_wait, bool interruptible)
{
	int count = TTM_MEMORY_ALLOC_RETRIES;

	while (unlikely(ttm_mem_global_reserve(glob, memory, himem, true)
	while (unlikely(ttm_mem_global_reserve(glob,
					       single_zone,
					       memory, true)
			!= 0)) {
		if (no_wait)
			return -ENOMEM;
@@ -216,6 +502,56 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
	return 0;
}

int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
			 bool no_wait, bool interruptible)
{
	/**
	 * Normal allocations of kernel memory are registered in
	 * all zones.
	 */

	return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
					 interruptible);
}

int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
			      struct page *page,
			      bool no_wait, bool interruptible)
{

	struct ttm_mem_zone *zone = NULL;

	/**
	 * Page allocations may be registed in a single zone
	 * only if highmem or !dma32.
	 */

#ifdef CONFIG_HIGHMEM
	if (PageHighMem(page) && glob->zone_highmem != NULL)
		zone = glob->zone_highmem;
#else
	if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
		zone = glob->zone_kernel;
#endif
	return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
					 interruptible);
}

void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
{
	struct ttm_mem_zone *zone = NULL;

#ifdef CONFIG_HIGHMEM
	if (PageHighMem(page) && glob->zone_highmem != NULL)
		zone = glob->zone_highmem;
#else
	if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
		zone = glob->zone_kernel;
#endif
	ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
}


size_t ttm_round_pot(size_t size)
{
	if ((size & (size - 1)) == 0)
+11 −18
Original line number Diff line number Diff line
@@ -166,7 +166,7 @@ static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
			set_page_dirty_lock(page);

		ttm->pages[i] = NULL;
		ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE, false);
		ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE);
		put_page(page);
	}
	ttm->state = tt_unpopulated;
@@ -187,22 +187,15 @@ static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
		if (!p)
			return NULL;

		if (PageHighMem(p)) {
			ret =
			    ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
						 false, false, true);
		ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
		if (unlikely(ret != 0))
			goto out_err;

		if (PageHighMem(p))
			ttm->pages[--ttm->first_himem_page] = p;
		} else {
			ret =
			    ttm_mem_global_alloc(mem_glob, PAGE_SIZE,
						 false, false, false);
			if (unlikely(ret != 0))
				goto out_err;
		else
			ttm->pages[++ttm->last_lomem_page] = p;
	}
	}
	return p;
out_err:
	put_page(p);
@@ -355,8 +348,8 @@ static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
				printk(KERN_ERR TTM_PFX
				       "Erroneous page count. "
				       "Leaking pages.\n");
			ttm_mem_global_free(ttm->bdev->mem_glob, PAGE_SIZE,
					    PageHighMem(cur_page));
			ttm_mem_global_free_page(ttm->bdev->mem_glob,
						 cur_page);
			__free_page(cur_page);
		}
	}
@@ -411,7 +404,7 @@ int ttm_tt_set_user(struct ttm_tt *ttm,
	 */

	ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
				   false, false, false);
				   false, false);
	if (unlikely(ret != 0))
		return ret;

@@ -422,7 +415,7 @@ int ttm_tt_set_user(struct ttm_tt *ttm,

	if (ret != num_pages && write) {
		ttm_tt_free_user_pages(ttm);
		ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE, false);
		ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE);
		return -ENOMEM;
	}

+24 −19
Original line number Diff line number Diff line
@@ -32,6 +32,7 @@
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/errno.h>
#include <linux/kobject.h>

/**
 * struct ttm_mem_shrink - callback to shrink TTM memory usage.
@@ -60,34 +61,33 @@ struct ttm_mem_shrink {
 * @queue: Wait queue for processes suspended waiting for memory.
 * @lock: Lock to protect the @shrink - and the memory accounting members,
 * that is, essentially the whole structure with some exceptions.
 * @emer_memory: Lowmem memory limit available for root.
 * @max_memory: Lowmem memory limit available for non-root.
 * @swap_limit: Lowmem memory limit where the shrink workqueue kicks in.
 * @used_memory: Currently used lowmem memory.
 * @used_total_memory: Currently used total (lowmem + highmem) memory.
 * @total_memory_swap_limit: Total memory limit where the shrink workqueue
 * kicks in.
 * @max_total_memory: Total memory available to non-root processes.
 * @emer_total_memory: Total memory available to root processes.
 * @zones: Array of pointers to accounting zones.
 * @num_zones: Number of populated entries in the @zones array.
 * @zone_kernel: Pointer to the kernel zone.
 * @zone_highmem: Pointer to the highmem zone if there is one.
 * @zone_dma32: Pointer to the dma32 zone if there is one.
 *
 * Note that this structure is not per device. It should be global for all
 * graphics devices.
 */

#define TTM_MEM_MAX_ZONES 2
struct ttm_mem_zone;
struct ttm_mem_global {
	struct kobject kobj;
	struct ttm_mem_shrink *shrink;
	struct workqueue_struct *swap_queue;
	struct work_struct work;
	wait_queue_head_t queue;
	spinlock_t lock;
	uint64_t emer_memory;
	uint64_t max_memory;
	uint64_t swap_limit;
	uint64_t used_memory;
	uint64_t used_total_memory;
	uint64_t total_memory_swap_limit;
	uint64_t max_total_memory;
	uint64_t emer_total_memory;
	struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
	unsigned int num_zones;
	struct ttm_mem_zone *zone_kernel;
#ifdef CONFIG_HIGHMEM
	struct ttm_mem_zone *zone_highmem;
#else
	struct ttm_mem_zone *zone_dma32;
#endif
};

/**
@@ -146,8 +146,13 @@ static inline void ttm_mem_unregister_shrink(struct ttm_mem_global *glob,
extern int ttm_mem_global_init(struct ttm_mem_global *glob);
extern void ttm_mem_global_release(struct ttm_mem_global *glob);
extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
				bool no_wait, bool interruptible, bool himem);
				bool no_wait, bool interruptible);
extern void ttm_mem_global_free(struct ttm_mem_global *glob,
				uint64_t amount, bool himem);
				uint64_t amount);
extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
				     struct page *page,
				     bool no_wait, bool interruptible);
extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
				     struct page *page);
extern size_t ttm_round_pot(size_t size);
#endif
Loading