Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bc4d0f61 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'ovs_hash'



Francesco Fusco says:

====================
ovs: introduce arch-specific fast hashing improvements

From: Daniel Borkmann <dborkman@redhat.com>

We are introducing a fast hash function (see patch1) that can be
used in the context of OpenVSwitch to reduce the hashing footprint
(patch2). For details, please see individual patches!

v1->v2:
 - Make hash generic and place it under lib
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 89e47d3b 500f8087
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
#ifndef _ASM_X86_HASH_H
#define _ASM_X86_HASH_H

struct fast_hash_ops;
extern void setup_arch_fast_hash(struct fast_hash_ops *ops);

#endif /* _ASM_X86_HASH_H */
+1 −1
Original line number Diff line number Diff line
@@ -24,7 +24,7 @@ lib-$(CONFIG_SMP) += rwlock.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o

obj-y += msr.o msr-reg.o msr-reg-export.o
obj-y += msr.o msr-reg.o msr-reg-export.o hash.o

ifeq ($(CONFIG_X86_32),y)
        obj-y += atomic64_32.o

arch/x86/lib/hash.c

0 → 100644
+88 −0
Original line number Diff line number Diff line
/*
 * Some portions derived from code covered by the following notice:
 *
 * Copyright (c) 2010-2013 Intel Corporation. All rights reserved.
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *   * Redistributions of source code must retain the above copyright
 *     notice, this list of conditions and the following disclaimer.
 *   * Redistributions in binary form must reproduce the above copyright
 *     notice, this list of conditions and the following disclaimer in
 *     the documentation and/or other materials provided with the
 *     distribution.
 *   * Neither the name of Intel Corporation nor the names of its
 *     contributors may be used to endorse or promote products derived
 *     from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

#include <linux/hash.h>

#include <asm/processor.h>
#include <asm/cpufeature.h>
#include <asm/hash.h>

static inline u32 crc32_u32(u32 crc, u32 val)
{
	asm ("crc32l %1,%0\n" : "+r" (crc) : "rm" (val));
	return crc;
}

static u32 intel_crc4_2_hash(const void *data, u32 len, u32 seed)
{
	const u32 *p32 = (const u32 *) data;
	u32 i, tmp = 0;

	for (i = 0; i < len / 4; i++)
		seed = crc32_u32(*p32++, seed);

	switch (3 - (len & 0x03)) {
	case 0:
		tmp |= *((const u8 *) p32 + 2) << 16;
		/* fallthrough */
	case 1:
		tmp |= *((const u8 *) p32 + 1) << 8;
		/* fallthrough */
	case 2:
		tmp |= *((const u8 *) p32);
		seed = crc32_u32(tmp, seed);
	default:
		break;
	}

	return seed;
}

static u32 intel_crc4_2_hash2(const u32 *data, u32 len, u32 seed)
{
	const u32 *p32 = (const u32 *) data;
	u32 i;

	for (i = 0; i < len; i++)
		seed = crc32_u32(*p32++, seed);

	return seed;
}

void setup_arch_fast_hash(struct fast_hash_ops *ops)
{
	if (cpu_has_xmm4_2) {
		ops->hash  = intel_crc4_2_hash;
		ops->hash2 = intel_crc4_2_hash2;
	}
}
+9 −0
Original line number Diff line number Diff line
#ifndef __ASM_GENERIC_HASH_H
#define __ASM_GENERIC_HASH_H

struct arch_hash_ops;
static inline void setup_arch_fast_hash(struct arch_hash_ops *ops)
{
}

#endif /* __ASM_GENERIC_HASH_H */
+36 −0
Original line number Diff line number Diff line
@@ -15,6 +15,7 @@
 */

#include <asm/types.h>
#include <asm/hash.h>
#include <linux/compiler.h>

/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
@@ -78,4 +79,39 @@ static inline u32 hash32_ptr(const void *ptr)
#endif
	return (u32)val;
}

struct fast_hash_ops {
	u32 (*hash)(const void *data, u32 len, u32 seed);
	u32 (*hash2)(const u32 *data, u32 len, u32 seed);
};

/**
 *	arch_fast_hash - Caclulates a hash over a given buffer that can have
 *			 arbitrary size. This function will eventually use an
 *			 architecture-optimized hashing implementation if
 *			 available, and trades off distribution for speed.
 *
 *	@data: buffer to hash
 *	@len: length of buffer in bytes
 *	@seed: start seed
 *
 *	Returns 32bit hash.
 */
extern u32 arch_fast_hash(const void *data, u32 len, u32 seed);

/**
 *	arch_fast_hash2 - Caclulates a hash over a given buffer that has a
 *			  size that is of a multiple of 32bit words. This
 *			  function will eventually use an architecture-
 *			  optimized hashing implementation if available,
 *			  and trades off distribution for speed.
 *
 *	@data: buffer to hash (must be 32bit padded)
 *	@len: number of 32bit words
 *	@seed: start seed
 *
 *	Returns 32bit hash.
 */
extern u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed);

#endif /* _LINUX_HASH_H */
Loading