Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d7a67cb1 authored by Thomas Hellstrom's avatar Thomas Hellstrom Committed by Dave Airlie
Browse files

drm/ttm: Use private locks for the default bo range manager



Searching for a free block in the range manager may in some situations be a
lenghty operation, and we want to avoid holding the global lru lock
during that time. Instead use a per-manager spinlock.

This leaves the global lru lock for quick lru list and swap list manipulation
only, including list manipulation associated with reserving buffer objects.

Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent 3205bc24
Loading
Loading
Loading
Loading
+45 −36
Original line number Diff line number Diff line
/**************************************************************************
 *
 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
 * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,20 +31,29 @@
#include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h"
#include <linux/jiffies.h>
#include "drm_mm.h"
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/spinlock.h>
#include <linux/module.h>

/**
 * Currently we use a spinlock for the lock, but a mutex *may* be
 * more appropriate to reduce scheduling latency if the range manager
 * ends up with very fragmented allocation patterns.
 */

struct ttm_range_manager {
	struct drm_mm mm;
	spinlock_t lock;
};

static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
			       struct ttm_buffer_object *bo,
			       struct ttm_placement *placement,
			       struct ttm_mem_reg *mem)
{
	struct ttm_bo_global *glob = man->bdev->glob;
	struct drm_mm *mm = man->priv;
	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
	struct drm_mm *mm = &rman->mm;
	struct drm_mm_node *node = NULL;
	unsigned long lpfn;
	int ret;
@@ -57,19 +66,19 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
		if (unlikely(ret))
			return ret;

		spin_lock(&glob->lru_lock);
		spin_lock(&rman->lock);
		node = drm_mm_search_free_in_range(mm,
					mem->num_pages, mem->page_alignment,
					placement->fpfn, lpfn, 1);
		if (unlikely(node == NULL)) {
			spin_unlock(&glob->lru_lock);
			spin_unlock(&rman->lock);
			return 0;
		}
		node = drm_mm_get_block_atomic_range(node, mem->num_pages,
						     mem->page_alignment,
						     placement->fpfn,
						     lpfn);
		spin_unlock(&glob->lru_lock);
		spin_unlock(&rman->lock);
	} while (node == NULL);

	mem->mm_node = node;
@@ -80,12 +89,12 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
				struct ttm_mem_reg *mem)
{
	struct ttm_bo_global *glob = man->bdev->glob;
	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;

	if (mem->mm_node) {
		spin_lock(&glob->lru_lock);
		spin_lock(&rman->lock);
		drm_mm_put_block(mem->mm_node);
		spin_unlock(&glob->lru_lock);
		spin_unlock(&rman->lock);
		mem->mm_node = NULL;
	}
}
@@ -93,49 +102,49 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
			   unsigned long p_size)
{
	struct drm_mm *mm;
	struct ttm_range_manager *rman;
	int ret;

	mm = kzalloc(sizeof(*mm), GFP_KERNEL);
	if (!mm)
	rman = kzalloc(sizeof(*rman), GFP_KERNEL);
	if (!rman)
		return -ENOMEM;

	ret = drm_mm_init(mm, 0, p_size);
	ret = drm_mm_init(&rman->mm, 0, p_size);
	if (ret) {
		kfree(mm);
		kfree(rman);
		return ret;
	}

	man->priv = mm;
	spin_lock_init(&rman->lock);
	man->priv = rman;
	return 0;
}

static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
{
	struct ttm_bo_global *glob = man->bdev->glob;
	struct drm_mm *mm = man->priv;
	int ret = 0;
	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
	struct drm_mm *mm = &rman->mm;

	spin_lock(&glob->lru_lock);
	spin_lock(&rman->lock);
	if (drm_mm_clean(mm)) {
		drm_mm_takedown(mm);
		kfree(mm);
		spin_unlock(&rman->lock);
		kfree(rman);
		man->priv = NULL;
	} else
		ret = -EBUSY;
	spin_unlock(&glob->lru_lock);
	return ret;
		return 0;
	}
	spin_unlock(&rman->lock);
	return -EBUSY;
}

static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
			     const char *prefix)
{
	struct ttm_bo_global *glob = man->bdev->glob;
	struct drm_mm *mm = man->priv;
	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;

	spin_lock(&glob->lru_lock);
	drm_mm_debug_table(mm, prefix);
	spin_unlock(&glob->lru_lock);
	spin_lock(&rman->lock);
	drm_mm_debug_table(&rman->mm, prefix);
	spin_unlock(&rman->lock);
}

const struct ttm_mem_type_manager_func ttm_bo_manager_func = {