Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 04ed8459 authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/amdgpu: remove chash



Remove the chash implementation for now since it isn't used any more.

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent c1a8abd9
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -225,8 +225,6 @@ config DRM_AMDGPU

source "drivers/gpu/drm/amd/amdgpu/Kconfig"

source "drivers/gpu/drm/amd/lib/Kconfig"

source "drivers/gpu/drm/nouveau/Kconfig"

source "drivers/gpu/drm/i915/Kconfig"
+0 −1
Original line number Diff line number Diff line
@@ -56,7 +56,6 @@ obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_SCHED)	+= scheduler/
obj-$(CONFIG_DRM_TDFX)	+= tdfx/
obj-$(CONFIG_DRM_R128)	+= r128/
obj-y			+= amd/lib/
obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/
+0 −105
Original line number Diff line number Diff line
@@ -2977,22 +2977,6 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
		 adev->vm_manager.fragment_size);
}

static struct amdgpu_retryfault_hashtable *init_fault_hash(void)
{
	struct amdgpu_retryfault_hashtable *fault_hash;

	fault_hash = kmalloc(sizeof(*fault_hash), GFP_KERNEL);
	if (!fault_hash)
		return fault_hash;

	INIT_CHASH_TABLE(fault_hash->hash,
			AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
	spin_lock_init(&fault_hash->lock);
	fault_hash->count = 0;

	return fault_hash;
}

/**
 * amdgpu_vm_init - initialize a vm instance
 *
@@ -3084,12 +3068,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
		vm->pasid = pasid;
	}

	vm->fault_hash = init_fault_hash();
	if (!vm->fault_hash) {
		r = -ENOMEM;
		goto error_free_root;
	}

	INIT_KFIFO(vm->faults);

	return 0;
@@ -3243,15 +3221,10 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
	struct amdgpu_bo_va_mapping *mapping, *tmp;
	bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
	struct amdgpu_bo *root;
	u64 fault;
	int i, r;

	amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);

	/* Clear pending page faults from IH when the VM is destroyed */
	while (kfifo_get(&vm->faults, &fault))
		amdgpu_vm_clear_fault(vm->fault_hash, fault);

	if (vm->pasid) {
		unsigned long flags;

@@ -3260,9 +3233,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
		spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
	}

	kfree(vm->fault_hash);
	vm->fault_hash = NULL;

	drm_sched_entity_destroy(&vm->entity);

	if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
@@ -3430,78 +3400,3 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
		}
	}
}

/**
 * amdgpu_vm_add_fault - Add a page fault record to fault hash table
 *
 * @fault_hash: fault hash table
 * @key: 64-bit encoding of PASID and address
 *
 * This should be called when a retry page fault interrupt is
 * received. If this is a new page fault, it will be added to a hash
 * table. The return value indicates whether this is a new fault, or
 * a fault that was already known and is already being handled.
 *
 * If there are too many pending page faults, this will fail. Retry
 * interrupts should be ignored in this case until there is enough
 * free space.
 *
 * Returns 0 if the fault was added, 1 if the fault was already known,
 * -ENOSPC if there are too many pending faults.
 */
int amdgpu_vm_add_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key)
{
	unsigned long flags;
	int r = -ENOSPC;

	if (WARN_ON_ONCE(!fault_hash))
		/* Should be allocated in amdgpu_vm_init
		 */
		return r;

	spin_lock_irqsave(&fault_hash->lock, flags);

	/* Only let the hash table fill up to 50% for best performance */
	if (fault_hash->count >= (1 << (AMDGPU_PAGEFAULT_HASH_BITS-1)))
		goto unlock_out;

	r = chash_table_copy_in(&fault_hash->hash, key, NULL);
	if (!r)
		fault_hash->count++;

	/* chash_table_copy_in should never fail unless we're losing count */
	WARN_ON_ONCE(r < 0);

unlock_out:
	spin_unlock_irqrestore(&fault_hash->lock, flags);
	return r;
}

/**
 * amdgpu_vm_clear_fault - Remove a page fault record
 *
 * @fault_hash: fault hash table
 * @key: 64-bit encoding of PASID and address
 *
 * This should be called when a page fault has been handled. Any
 * future interrupt with this key will be processed as a new
 * page fault.
 */
void amdgpu_vm_clear_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key)
{
	unsigned long flags;
	int r;

	if (!fault_hash)
		return;

	spin_lock_irqsave(&fault_hash->lock, flags);

	r = chash_table_remove(&fault_hash->hash, key, NULL);
	if (!WARN_ON_ONCE(r < 0)) {
		fault_hash->count--;
		WARN_ON_ONCE(fault_hash->count < 0);
	}

	spin_unlock_irqrestore(&fault_hash->lock, flags);
}
+0 −14
Original line number Diff line number Diff line
@@ -30,7 +30,6 @@
#include <drm/gpu_scheduler.h>
#include <drm/drm_file.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <linux/chash.h>

#include "amdgpu_sync.h"
#include "amdgpu_ring.h"
@@ -178,13 +177,6 @@ struct amdgpu_task_info {
	pid_t	tgid;
};

#define AMDGPU_PAGEFAULT_HASH_BITS 8
struct amdgpu_retryfault_hashtable {
	DECLARE_CHASH_TABLE(hash, AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
	spinlock_t	lock;
	int		count;
};

struct amdgpu_vm {
	/* tree of virtual addresses mapped */
	struct rb_root_cached	va;
@@ -244,7 +236,6 @@ struct amdgpu_vm {
	struct ttm_lru_bulk_move lru_bulk_move;
	/* mark whether can do the bulk move */
	bool			bulk_moveable;
	struct amdgpu_retryfault_hashtable *fault_hash;
};

struct amdgpu_vm_manager {
@@ -354,11 +345,6 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);

void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
				struct amdgpu_vm *vm);

int amdgpu_vm_add_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key);

void amdgpu_vm_clear_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key);

void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo);

#endif
+0 −366
Original line number Diff line number Diff line
/*
 * Copyright 2017 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */

#ifndef _LINUX_CHASH_H
#define _LINUX_CHASH_H

#include <linux/types.h>
#include <linux/hash.h>
#include <linux/bug.h>
#include <asm/bitsperlong.h>

#if BITS_PER_LONG == 32
# define _CHASH_LONG_SHIFT 5
#elif BITS_PER_LONG == 64
# define _CHASH_LONG_SHIFT 6
#else
# error "Unexpected BITS_PER_LONG"
#endif

struct __chash_table {
	u8 bits;
	u8 key_size;
	unsigned int value_size;
	u32 size_mask;
	unsigned long *occup_bitmap, *valid_bitmap;
	union {
		u32 *keys32;
		u64 *keys64;
	};
	u8 *values;

#ifdef CONFIG_CHASH_STATS
	u64 hits, hits_steps, hits_time_ns;
	u64 miss, miss_steps, miss_time_ns;
	u64 relocs, reloc_dist;
#endif
};

#define __CHASH_BITMAP_SIZE(bits)				\
	(((1 << (bits)) + BITS_PER_LONG - 1) / BITS_PER_LONG)
#define __CHASH_ARRAY_SIZE(bits, size)				\
	((((size) << (bits)) + sizeof(long) - 1) / sizeof(long))

#define __CHASH_DATA_SIZE(bits, key_size, value_size)	\
	(__CHASH_BITMAP_SIZE(bits) * 2 +		\
	 __CHASH_ARRAY_SIZE(bits, key_size) +		\
	 __CHASH_ARRAY_SIZE(bits, value_size))

#define STRUCT_CHASH_TABLE(bits, key_size, value_size)			\
	struct {							\
		struct __chash_table table;				\
		unsigned long data					\
			[__CHASH_DATA_SIZE(bits, key_size, value_size)];\
	}

/**
 * struct chash_table - Dynamically allocated closed hash table
 *
 * Use this struct for dynamically allocated hash tables (using
 * chash_table_alloc and chash_table_free), where the size is
 * determined at runtime.
 */
struct chash_table {
	struct __chash_table table;
	unsigned long *data;
};

/**
 * DECLARE_CHASH_TABLE - macro to declare a closed hash table
 * @table: name of the declared hash table
 * @bts: Table size will be 2^bits entries
 * @key_sz: Size of hash keys in bytes, 4 or 8
 * @val_sz: Size of data values in bytes, can be 0
 *
 * This declares the hash table variable with a static size.
 *
 * The closed hash table stores key-value pairs with low memory and
 * lookup overhead. In operation it performs no dynamic memory
 * management. The data being stored does not require any
 * list_heads. The hash table performs best with small @val_sz and as
 * long as some space (about 50%) is left free in the table. But the
 * table can still work reasonably efficiently even when filled up to
 * about 90%. If bigger data items need to be stored and looked up,
 * store the pointer to it as value in the hash table.
 *
 * @val_sz may be 0. This can be useful when all the stored
 * information is contained in the key itself and the fact that it is
 * in the hash table (or not).
 */
#define DECLARE_CHASH_TABLE(table, bts, key_sz, val_sz)		\
	STRUCT_CHASH_TABLE(bts, key_sz, val_sz) table

#ifdef CONFIG_CHASH_STATS
#define __CHASH_STATS_INIT(prefix),		\
		prefix.hits = 0,		\
		prefix.hits_steps = 0,		\
		prefix.hits_time_ns = 0,	\
		prefix.miss = 0,		\
		prefix.miss_steps = 0,		\
		prefix.miss_time_ns = 0,	\
		prefix.relocs = 0,		\
		prefix.reloc_dist = 0
#else
#define __CHASH_STATS_INIT(prefix)
#endif

#define __CHASH_TABLE_INIT(prefix, data, bts, key_sz, val_sz)	\
	prefix.bits = (bts),					\
		prefix.key_size = (key_sz),			\
		prefix.value_size = (val_sz),			\
		prefix.size_mask = ((1 << bts) - 1),		\
		prefix.occup_bitmap = &data[0],			\
		prefix.valid_bitmap = &data			\
			[__CHASH_BITMAP_SIZE(bts)],		\
		prefix.keys64 = (u64 *)&data			\
			[__CHASH_BITMAP_SIZE(bts) * 2],		\
		prefix.values = (u8 *)&data			\
			[__CHASH_BITMAP_SIZE(bts) * 2 +		\
			 __CHASH_ARRAY_SIZE(bts, key_sz)]	\
		__CHASH_STATS_INIT(prefix)

/**
 * DEFINE_CHASH_TABLE - macro to define and initialize a closed hash table
 * @tbl: name of the declared hash table
 * @bts: Table size will be 2^bits entries
 * @key_sz: Size of hash keys in bytes, 4 or 8
 * @val_sz: Size of data values in bytes, can be 0
 *
 * Note: the macro can be used for global and local hash table variables.
 */
#define DEFINE_CHASH_TABLE(tbl, bts, key_sz, val_sz)			\
	DECLARE_CHASH_TABLE(tbl, bts, key_sz, val_sz) = {		\
		.table = {						\
			__CHASH_TABLE_INIT(, (tbl).data, bts, key_sz, val_sz) \
		},							\
		.data = {0}						\
	}

/**
 * INIT_CHASH_TABLE - Initialize a hash table declared by DECLARE_CHASH_TABLE
 * @tbl: name of the declared hash table
 * @bts: Table size will be 2^bits entries
 * @key_sz: Size of hash keys in bytes, 4 or 8
 * @val_sz: Size of data values in bytes, can be 0
 */
#define INIT_CHASH_TABLE(tbl, bts, key_sz, val_sz)			\
	__CHASH_TABLE_INIT(((tbl).table), (tbl).data, bts, key_sz, val_sz)

int chash_table_alloc(struct chash_table *table, u8 bits, u8 key_size,
		      unsigned int value_size, gfp_t gfp_mask);
void chash_table_free(struct chash_table *table);

/**
 * chash_table_dump_stats - Dump statistics of a closed hash table
 * @tbl: Pointer to the table structure
 *
 * Dumps some performance statistics of the table gathered in operation
 * in the kernel log using pr_debug. If CONFIG_DYNAMIC_DEBUG is enabled,
 * user must turn on messages for chash.c (file chash.c +p).
 */
#ifdef CONFIG_CHASH_STATS
#define chash_table_dump_stats(tbl) __chash_table_dump_stats(&(*tbl).table)

void __chash_table_dump_stats(struct __chash_table *table);
#else
#define chash_table_dump_stats(tbl)
#endif

/**
 * chash_table_reset_stats - Reset statistics of a closed hash table
 * @tbl: Pointer to the table structure
 */
#ifdef CONFIG_CHASH_STATS
#define chash_table_reset_stats(tbl) __chash_table_reset_stats(&(*tbl).table)

static inline void __chash_table_reset_stats(struct __chash_table *table)
{
	(void)table __CHASH_STATS_INIT((*table));
}
#else
#define chash_table_reset_stats(tbl)
#endif

/**
 * chash_table_copy_in - Copy a new value into the hash table
 * @tbl: Pointer to the table structure
 * @key: Key of the entry to add or update
 * @value: Pointer to value to copy, may be NULL
 *
 * If @key already has an entry, its value is replaced. Otherwise a
 * new entry is added. If @value is NULL, the value is left unchanged
 * or uninitialized. Returns 1 if an entry already existed, 0 if a new
 * entry was added or %-ENOMEM if there was no free space in the
 * table.
 */
#define chash_table_copy_in(tbl, key, value)			\
	__chash_table_copy_in(&(*tbl).table, key, value)

int __chash_table_copy_in(struct __chash_table *table, u64 key,
			  const void *value);

/**
 * chash_table_copy_out - Copy a value out of the hash table
 * @tbl: Pointer to the table structure
 * @key: Key of the entry to find
 * @value: Pointer to value to copy, may be NULL
 *
 * If @value is not NULL and the table has a non-0 value_size, the
 * value at @key is copied to @value. Returns the slot index of the
 * entry or %-EINVAL if @key was not found.
 */
#define chash_table_copy_out(tbl, key, value)			\
	__chash_table_copy_out(&(*tbl).table, key, value, false)

int __chash_table_copy_out(struct __chash_table *table, u64 key,
			   void *value, bool remove);

/**
 * chash_table_remove - Remove an entry from the hash table
 * @tbl: Pointer to the table structure
 * @key: Key of the entry to find
 * @value: Pointer to value to copy, may be NULL
 *
 * If @value is not NULL and the table has a non-0 value_size, the
 * value at @key is copied to @value. The entry is removed from the
 * table. Returns the slot index of the removed entry or %-EINVAL if
 * @key was not found.
 */
#define chash_table_remove(tbl, key, value)			\
	__chash_table_copy_out(&(*tbl).table, key, value, true)

/*
 * Low level iterator API used internally by the above functions.
 */
struct chash_iter {
	struct __chash_table *table;
	unsigned long mask;
	int slot;
};

/**
 * CHASH_ITER_INIT - Initialize a hash table iterator
 * @tbl: Pointer to hash table to iterate over
 * @s: Initial slot number
 */
#define CHASH_ITER_INIT(table, s) {			\
		table,					\
		1UL << ((s) & (BITS_PER_LONG - 1)),	\
		s					\
	}
/**
 * CHASH_ITER_SET - Set hash table iterator to new slot
 * @iter: Iterator
 * @s: Slot number
 */
#define CHASH_ITER_SET(iter, s)					\
	(iter).mask = 1UL << ((s) & (BITS_PER_LONG - 1)),	\
	(iter).slot = (s)
/**
 * CHASH_ITER_INC - Increment hash table iterator
 * @table: Hash table to iterate over
 *
 * Wraps around at the end.
 */
#define CHASH_ITER_INC(iter) do {					\
		(iter).mask = (iter).mask << 1 |			\
			(iter).mask >> (BITS_PER_LONG - 1);		\
		(iter).slot = ((iter).slot + 1) & (iter).table->size_mask; \
	} while (0)

static inline bool chash_iter_is_valid(const struct chash_iter iter)
{
	BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
	return !!(iter.table->valid_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &
		  iter.mask);
}
static inline bool chash_iter_is_empty(const struct chash_iter iter)
{
	BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
	return !(iter.table->occup_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &
		 iter.mask);
}

static inline void chash_iter_set_valid(const struct chash_iter iter)
{
	BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
	iter.table->valid_bitmap[iter.slot >> _CHASH_LONG_SHIFT] |= iter.mask;
	iter.table->occup_bitmap[iter.slot >> _CHASH_LONG_SHIFT] |= iter.mask;
}
static inline void chash_iter_set_invalid(const struct chash_iter iter)
{
	BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
	iter.table->valid_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &= ~iter.mask;
}
static inline void chash_iter_set_empty(const struct chash_iter iter)
{
	BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
	iter.table->occup_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &= ~iter.mask;
}

static inline u32 chash_iter_key32(const struct chash_iter iter)
{
	BUG_ON(iter.table->key_size != 4);
	BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
	return iter.table->keys32[iter.slot];
}
static inline u64 chash_iter_key64(const struct chash_iter iter)
{
	BUG_ON(iter.table->key_size != 8);
	BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
	return iter.table->keys64[iter.slot];
}
static inline u64 chash_iter_key(const struct chash_iter iter)
{
	BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
	return (iter.table->key_size == 4) ?
		iter.table->keys32[iter.slot] : iter.table->keys64[iter.slot];
}

static inline u32 chash_iter_hash32(const struct chash_iter iter)
{
	BUG_ON(iter.table->key_size != 4);
	return hash_32(chash_iter_key32(iter), iter.table->bits);
}

static inline u32 chash_iter_hash64(const struct chash_iter iter)
{
	BUG_ON(iter.table->key_size != 8);
	return hash_64(chash_iter_key64(iter), iter.table->bits);
}

static inline u32 chash_iter_hash(const struct chash_iter iter)
{
	return (iter.table->key_size == 4) ?
		hash_32(chash_iter_key32(iter), iter.table->bits) :
		hash_64(chash_iter_key64(iter), iter.table->bits);
}

static inline void *chash_iter_value(const struct chash_iter iter)
{
	BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
	return iter.table->values +
		((unsigned long)iter.slot * iter.table->value_size);
}

#endif /* _LINUX_CHASH_H */
Loading