Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 086e9dc0 authored by James Hogan's avatar James Hogan
Browse files

metag: Optimised library functions



Add optimised library functions for metag.

Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
parent f507758c
Loading
Loading
Loading
Loading
+92 −0
Original line number Diff line number Diff line
#ifndef _METAG_CHECKSUM_H
#define _METAG_CHECKSUM_H

/*
 * computes the checksum of a memory block at buff, length len,
 * and adds in "sum" (32-bit)
 *
 * returns a 32-bit number suitable for feeding into itself
 * or csum_tcpudp_magic
 *
 * this function must be called with even lengths, except
 * for the last fragment, which may be odd
 *
 * it's best to have buff aligned on a 32-bit boundary
 */
extern __wsum csum_partial(const void *buff, int len, __wsum sum);

/*
 * the same as csum_partial, but copies from src while it
 * checksums
 *
 * here even more important to align src and dst on a 32-bit (or even
 * better 64-bit) boundary
 */
extern __wsum csum_partial_copy(const void *src, void *dst, int len,
				__wsum sum);

/*
 * the same as csum_partial_copy, but copies from user space.
 *
 * here even more important to align src and dst on a 32-bit (or even
 * better 64-bit) boundary
 */
extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
					int len, __wsum sum, int *csum_err);

#define csum_partial_copy_nocheck(src, dst, len, sum)	\
	csum_partial_copy((src), (dst), (len), (sum))

/*
 * Fold a partial checksum
 */
static inline __sum16 csum_fold(__wsum csum)
{
	u32 sum = (__force u32)csum;
	sum = (sum & 0xffff) + (sum >> 16);
	sum = (sum & 0xffff) + (sum >> 16);
	return (__force __sum16)~sum;
}

/*
 * This is a version of ip_compute_csum() optimized for IP headers,
 * which always checksum on 4 octet boundaries.
 */
extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);

/*
 * computes the checksum of the TCP/UDP pseudo-header
 * returns a 16-bit checksum, already complemented
 */
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
					unsigned short len,
					unsigned short proto,
					__wsum sum)
{
	unsigned long len_proto = (proto + len) << 8;
	asm ("ADD    %0, %0, %1\n"
	     "ADDS   %0, %0, %2\n"
	     "ADDCS  %0, %0, #1\n"
	     "ADDS   %0, %0, %3\n"
	     "ADDCS  %0, %0, #1\n"
	     : "=d" (sum)
	     : "d" (daddr), "d" (saddr), "d" (len_proto),
	       "0" (sum)
	     : "cc");
	return sum;
}

static inline __sum16
csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
		  unsigned short proto, __wsum sum)
{
	return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}

/*
 * this routine is used for miscellaneous IP-like checksums, mainly
 * in icmp.c
 */
extern __sum16 ip_compute_csum(const void *buff, int len);

#endif /* _METAG_CHECKSUM_H */
+12 −0
Original line number Diff line number Diff line
#ifndef __ASM_DIV64_H__
#define __ASM_DIV64_H__

#include <asm-generic/div64.h>

extern u64 div_u64(u64 dividend, u64 divisor);
extern s64 div_s64(s64 dividend, s64 divisor);

#define div_u64 div_u64
#define div_s64 div_s64

#endif
+13 −0
Original line number Diff line number Diff line
#ifndef _METAG_STRING_H_
#define _METAG_STRING_H_

#define __HAVE_ARCH_MEMSET
extern void *memset(void *__s, int __c, size_t __count);

#define __HAVE_ARCH_MEMCPY
void *memcpy(void *__to, __const__ void *__from, size_t __n);

#define __HAVE_ARCH_MEMMOVE
extern void *memmove(void *__dest, __const__ void *__src, size_t __n);

#endif /* _METAG_STRING_H_ */
+33 −0
Original line number Diff line number Diff line
! Copyright (C) 2012 by Imagination Technologies Ltd.
!
! 64-bit arithmetic shift left routine.
!

	.text
	.global ___ashldi3
	.type   ___ashldi3,function

___ashldi3:
	MOV     D0Re0,D0Ar2
	MOV     D1Re0,D1Ar1
	CMP     D1Ar3,#0                ! COUNT == 0
	MOVEQ   PC,D1RtP                ! Yes, return

	SUBS    D0Ar4,D1Ar3,#32         ! N = COUNT - 32
	BGE     $L10

!! Shift < 32
	NEG     D0Ar4,D0Ar4             ! N = - N
	LSL     D1Re0,D1Re0,D1Ar3       ! HI = HI << COUNT
	LSR     D0Ar6,D0Re0,D0Ar4       ! TMP= LO >> -(COUNT - 32)
	OR      D1Re0,D1Re0,D0Ar6       ! HI = HI | TMP
	SWAP    D0Ar4,D1Ar3
	LSL     D0Re0,D0Re0,D0Ar4       ! LO = LO << COUNT
	MOV     PC,D1RtP

$L10:
!! Shift >= 32
	LSL     D1Re0,D0Re0,D0Ar4       ! HI = LO << N
	MOV     D0Re0,#0                ! LO = 0
	MOV     PC,D1RtP
	.size ___ashldi3,.-___ashldi3
+33 −0
Original line number Diff line number Diff line
! Copyright (C) 2012 by Imagination Technologies Ltd.
!
! 64-bit arithmetic shift right routine.
!

	.text
	.global ___ashrdi3
	.type   ___ashrdi3,function

___ashrdi3:
	MOV     D0Re0,D0Ar2
	MOV     D1Re0,D1Ar1
	CMP     D1Ar3,#0                ! COUNT == 0
	MOVEQ   PC,D1RtP                ! Yes, return

	MOV     D0Ar4,D1Ar3
	SUBS    D1Ar3,D1Ar3,#32         ! N = COUNT - 32
	BGE     $L20

!! Shift < 32
	NEG     D1Ar3,D1Ar3             ! N = - N
	LSR     D0Re0,D0Re0,D0Ar4       ! LO = LO >> COUNT
	LSL     D0Ar6,D1Re0,D1Ar3       ! TMP= HI << -(COUNT - 32)
	OR      D0Re0,D0Re0,D0Ar6       ! LO = LO | TMP
	SWAP    D1Ar3,D0Ar4
	ASR     D1Re0,D1Re0,D1Ar3       ! HI = HI >> COUNT
	MOV     PC,D1RtP
$L20:
!! Shift >= 32
	ASR     D0Re0,D1Re0,D1Ar3       ! LO = HI >> N
	ASR     D1Re0,D1Re0,#31         ! HI = HI >> 31
	MOV     PC,D1RtP
	.size ___ashrdi3,.-___ashrdi3
Loading