Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d76c1ae4 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

x86: clean up csum-wrappers_64.c some more



no code changed:

arch/x86/lib/csum-wrappers_64.o:
   text    data     bss     dec     hex filename
    839       0       0     839     347 csum-wrappers_64.o.before
    839       0       0     839     347 csum-wrappers_64.o.after
md5:
b31994226c33e0b52bef5a0e110b84b0  csum-wrappers_64.o.before.asm
b31994226c33e0b52bef5a0e110b84b0  csum-wrappers_64.o.after.asm

Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 0df025b7
Loading
Loading
Loading
Loading
+51 −36
Original line number Original line Diff line number Diff line
/* Copyright 2002,2003 Andi Kleen, SuSE Labs.
/*
 * Copyright 2002, 2003 Andi Kleen, SuSE Labs.
 * Subject to the GNU Public License v.2
 * Subject to the GNU Public License v.2
 *
 *
 * Wrappers of assembly checksum functions for x86-64.
 * Wrappers of assembly checksum functions for x86-64.
 */
 */

#include <asm/checksum.h>
#include <asm/checksum.h>
#include <linux/module.h>
#include <linux/module.h>


@@ -24,19 +24,26 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
{
{
	might_sleep();
	might_sleep();
	*errp = 0;
	*errp = 0;
	if (likely(access_ok(VERIFY_READ, src, len))) {

		/* Why 6, not 7? To handle odd addresses aligned we
	if (!likely(access_ok(VERIFY_READ, src, len)))
		   would need to do considerable complications to fix the
		goto out_err;
		   checksum which is defined as an 16bit accumulator. The

		   fix alignment code is primarily for performance
	/*
		   compatibility with 32bit and that will handle odd
	 * Why 6, not 7? To handle odd addresses aligned we
		   addresses slowly too. */
	 * would need to do considerable complications to fix the
	 * checksum which is defined as an 16bit accumulator. The
	 * fix alignment code is primarily for performance
	 * compatibility with 32bit and that will handle odd
	 * addresses slowly too.
	 */
	if (unlikely((unsigned long)src & 6)) {
	if (unlikely((unsigned long)src & 6)) {
		while (((unsigned long)src & 6) && len >= 2) {
		while (((unsigned long)src & 6) && len >= 2) {
			__u16 val16;
			__u16 val16;

			*errp = __get_user(val16, (const __u16 __user *)src);
			*errp = __get_user(val16, (const __u16 __user *)src);
			if (*errp)
			if (*errp)
				return isum;
				return isum;

			*(__u16 *)dst = val16;
			*(__u16 *)dst = val16;
			isum = (__force __wsum)add32_with_carry(
			isum = (__force __wsum)add32_with_carry(
					(__force unsigned)isum, val16);
					(__force unsigned)isum, val16);
@@ -47,14 +54,17 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
	}
	}
	isum = csum_partial_copy_generic((__force const void *)src,
	isum = csum_partial_copy_generic((__force const void *)src,
				dst, len, isum, errp, NULL);
				dst, len, isum, errp, NULL);
		if (likely(*errp == 0))
	if (unlikely(*errp))
		goto out_err;

	return isum;
	return isum;
	}

out_err:
	*errp = -EFAULT;
	*errp = -EFAULT;
	memset(dst, 0, len);
	memset(dst, 0, len);

	return isum;
	return isum;
}
}

EXPORT_SYMBOL(csum_partial_copy_from_user);
EXPORT_SYMBOL(csum_partial_copy_from_user);


/**
/**
@@ -73,6 +83,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
			  int len, __wsum isum, int *errp)
			  int len, __wsum isum, int *errp)
{
{
	might_sleep();
	might_sleep();

	if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) {
	if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) {
		*errp = -EFAULT;
		*errp = -EFAULT;
		return 0;
		return 0;
@@ -81,6 +92,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
	if (unlikely((unsigned long)dst & 6)) {
	if (unlikely((unsigned long)dst & 6)) {
		while (((unsigned long)dst & 6) && len >= 2) {
		while (((unsigned long)dst & 6) && len >= 2) {
			__u16 val16 = *(__u16 *)src;
			__u16 val16 = *(__u16 *)src;

			isum = (__force __wsum)add32_with_carry(
			isum = (__force __wsum)add32_with_carry(
					(__force unsigned)isum, val16);
					(__force unsigned)isum, val16);
			*errp = __put_user(val16, (__u16 __user *)dst);
			*errp = __put_user(val16, (__u16 __user *)dst);
@@ -93,9 +105,9 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
	}
	}


	*errp = 0;
	*errp = 0;
	return csum_partial_copy_generic(src, (void __force *)dst, len, isum, NULL, errp);
	return csum_partial_copy_generic(src, (void __force *)dst,
					 len, isum, NULL, errp);
}
}

EXPORT_SYMBOL(csum_partial_copy_to_user);
EXPORT_SYMBOL(csum_partial_copy_to_user);


/**
/**
@@ -122,14 +134,17 @@ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,


	rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) +
	rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) +
		(__force __u64)sum;
		(__force __u64)sum;

	asm("	addq (%[saddr]),%[sum]\n"
	asm("	addq (%[saddr]),%[sum]\n"
	    "	adcq 8(%[saddr]),%[sum]\n"
	    "	adcq 8(%[saddr]),%[sum]\n"
	    "	adcq (%[daddr]),%[sum]\n"
	    "	adcq (%[daddr]),%[sum]\n"
	    "	adcq 8(%[daddr]),%[sum]\n"
	    "	adcq 8(%[daddr]),%[sum]\n"
	    "	adcq $0,%[sum]\n"
	    "	adcq $0,%[sum]\n"

	    : [sum] "=r" (sum64)
	    : [sum] "=r" (sum64)
	    : "[sum]" (rest), [saddr] "r" (saddr), [daddr] "r" (daddr));
	    : "[sum]" (rest), [saddr] "r" (saddr), [daddr] "r" (daddr));
	return csum_fold((__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32));
}


	return csum_fold(
	       (__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32));
}
EXPORT_SYMBOL(csum_ipv6_magic);
EXPORT_SYMBOL(csum_ipv6_magic);