Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3532010b authored by Al Viro's avatar Al Viro Committed by David S. Miller
Browse files

[NET]: Cris checksum annotations and cleanups.



* sanitize prototypes and annotate
* kill cast-as-lvalue abuses in csum_partial()
* usual ntohs-equals-shift for checksum purposes

Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9be259aa
Loading
Loading
Loading
Loading
+32 −30
Original line number Diff line number Diff line
@@ -47,14 +47,16 @@

#include <asm/delay.h>

unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
__wsum csum_partial(const void *p, int len, __wsum __sum)
{
	u32 sum = (__force u32)__sum;
	const u16 *buff = p;
	/*
	* Experiments with ethernet and slip connections show that buff
	* is aligned on either a 2-byte or 4-byte boundary.
	*/
  const unsigned char *endMarker = buff + len;
  const unsigned char *marker = endMarker - (len % 16);
	const void *endMarker = p + len;
	const void *marker = endMarker - (len % 16);
#if 0
	if((int)buff & 0x3)
		printk("unaligned buff %p\n", buff);
@@ -62,24 +64,24 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
#endif
	BITON;
	while (buff < marker) {
    sum += *((unsigned short *)buff)++;
    sum += *((unsigned short *)buff)++;
    sum += *((unsigned short *)buff)++;
    sum += *((unsigned short *)buff)++;
    sum += *((unsigned short *)buff)++;
    sum += *((unsigned short *)buff)++;
    sum += *((unsigned short *)buff)++;
    sum += *((unsigned short *)buff)++;
		sum += *buff++;
		sum += *buff++;
		sum += *buff++;
		sum += *buff++;
		sum += *buff++;
		sum += *buff++;
		sum += *buff++;
		sum += *buff++;
	}
	marker = endMarker - (len % 2);
  while(buff < marker) {
    sum += *((unsigned short *)buff)++;
  }
  if(endMarker - buff > 0) {
    sum += *buff;                 /* add extra byte seperately */
  }
	while (buff < marker)
		sum += *buff++;

	if (endMarker > buff)
		sum += *(const u8 *)buff;	/* add extra byte seperately */

	BITOFF;
  return(sum);
	return (__force __wsum)sum;
}

EXPORT_SYMBOL(csum_partial);
+5 −5
Original line number Diff line number Diff line
@@ -8,11 +8,11 @@
 * to split all of those into 16-bit components, then add.
 */

static inline unsigned int
csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len,
		   unsigned short proto, unsigned int sum)
static inline __wsum
csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
		   unsigned short proto, __wsum sum)
{
	int res;
	__wsum res;
	__asm__ ("add.d %2, %0\n\t"
		 "ax\n\t"
		 "add.d %3, %0\n\t"
@@ -21,7 +21,7 @@ csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len,
		 "ax\n\t"
		 "addq 0, %0\n"
	: "=r" (res)
	: "0" (sum), "r" (daddr), "r" (saddr), "r" ((ntohs(len) << 16) + (proto << 8)));
	: "0" (sum), "r" (daddr), "r" (saddr), "r" ((len + proto) << 8));

	return res;
}	
+5 −5
Original line number Diff line number Diff line
@@ -9,11 +9,11 @@
 * checksum. Which means it would be necessary to split all those into
 * 16-bit components and then add.
 */
static inline unsigned int
csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
		   unsigned short len, unsigned short proto, unsigned int sum)
static inline __wsum
csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
		   unsigned short len, unsigned short proto, __wsum sum)
{
	int res;
	__wsum res;

	__asm__ __volatile__ ("add.d %2, %0\n\t"
			      "addc %3, %0\n\t"
@@ -21,7 +21,7 @@ csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr,
			      "addc 0, %0\n\t"
			      : "=r" (res)
			      : "0" (sum), "r" (daddr), "r" (saddr), \
			      "r" ((ntohs(len) << 16) + (proto << 8)));
			      "r" ((len + proto) << 8));

	return res;
}
+15 −19
Original line number Diff line number Diff line
@@ -17,7 +17,7 @@
 *
 * it's best to have buff aligned on a 32-bit boundary
 */
unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
__wsum csum_partial(const void *buff, int len, __wsum sum);

/*
 * the same as csum_partial, but copies from src while it
@@ -27,26 +27,23 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
 * better 64-bit) boundary
 */

unsigned int csum_partial_copy_nocheck(const char *src, char *dst,
				       int len, unsigned int sum);
__wsum csum_partial_copy_nocheck(const void *src, void *dst,
				       int len, __wsum sum);

/*
 *	Fold a partial checksum into a word
 */

static inline unsigned int csum_fold(unsigned int sum)
static inline __sum16 csum_fold(__wsum csum)
{
	/* the while loop is unnecessary really, it's always enough with two
	   iterations */
	
	while(sum >> 16)
	u32 sum = (__force u32)csum;
	sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */
	
	return ~sum;
	sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */
	return (__force __sum16)~sum;
}

extern unsigned int csum_partial_copy_from_user(const char *src, char *dst,
						int len, unsigned int sum, 
extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
						int len, __wsum sum,
						int *errptr);

/*
@@ -55,8 +52,7 @@ extern unsigned int csum_partial_copy_from_user(const char *src, char *dst,
 *
 */

static inline unsigned short ip_fast_csum(unsigned char * iph,
					  unsigned int ihl)
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
	return csum_fold(csum_partial(iph, ihl * 4, 0));
}
@@ -66,11 +62,10 @@ static inline unsigned short ip_fast_csum(unsigned char * iph,
 * returns a 16-bit checksum, already complemented
 */

static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
						   unsigned long daddr,
static inline __sum16 int csum_tcpudp_magic(__be32 saddr, __be32 daddr,
						   unsigned short len,
						   unsigned short proto,
						   unsigned int sum)
						   __wsum sum)
{
	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}
@@ -80,7 +75,8 @@ static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
 * in icmp.c
 */

static inline unsigned short ip_compute_csum(unsigned char * buff, int len) {
static inline __sum16 ip_compute_csum(const void *buff, int len)
{
	return csum_fold (csum_partial(buff, len, 0));
}