Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5cf97ebd authored by Max Filippov's avatar Max Filippov
Browse files

xtensa: clean up functions in assembly code



Use ENTRY and ENDPROC throughout arch/xtensa/lib assembly sources.
Introduce asm/linkage.h and define xtensa-specific __ALIGN macro there.

Signed-off-by: default avatarMax Filippov <jcmvbkbc@gmail.com>
parent fbb871e2
Loading
Loading
Loading
Loading
+9 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 */

#ifndef __ASM_LINKAGE_H
#define __ASM_LINKAGE_H

#define __ALIGN		.align 4
#define __ALIGN_STR	".align 4"

#endif
+10 −20
Original line number Diff line number Diff line
@@ -9,6 +9,7 @@
 * Copyright (C) 2002 - 2012 Tensilica Inc.
 */

#include <linux/linkage.h>
#include <variant/core.h>
#include <asm/asmmacro.h>

@@ -108,10 +109,7 @@
	addi	a5, a5,  2
	j	.Ldstaligned	# dst is now aligned, return to main algorithm

	.align	4
	.global	memcpy
	.type   memcpy,@function
memcpy:
ENTRY(memcpy)

	entry	sp, 16		# minimal stack frame
	# a2/ dst, a3/ src, a4/ len
@@ -273,14 +271,14 @@ memcpy:
	s8i	a6, a5,  0
	retw

ENDPROC(memcpy)

/*
 * void bcopy(const void *src, void *dest, size_t n);
 */
	.align	4
	.global	bcopy
	.type   bcopy,@function
bcopy:

ENTRY(bcopy)

	entry	sp, 16		# minimal stack frame
	# a2=src, a3=dst, a4=len
	mov	a5, a3
@@ -288,6 +286,8 @@ bcopy:
	mov	a2, a5
	j	.Lmovecommon	# go to common code for memmove+bcopy

ENDPROC(bcopy)

/*
 * void *memmove(void *dst, const void *src, size_t len);
 *
@@ -376,10 +376,7 @@ bcopy:
	j	.Lbackdstaligned	# dst is now aligned,
					# return to main algorithm

	.align	4
	.global	memmove
	.type   memmove,@function
memmove:
ENTRY(memmove)

	entry	sp, 16		# minimal stack frame
	# a2/ dst, a3/ src, a4/ len
@@ -551,11 +548,4 @@ memmove:
	s8i	a6, a5,  0
	retw


/*
 * Local Variables:
 * mode:fundamental
 * comment-start: "# "
 * comment-start-skip: "# *"
 * End:
 */
ENDPROC(memmove)
+4 −4
Original line number Diff line number Diff line
@@ -11,6 +11,7 @@
 *  Copyright (C) 2002 Tensilica Inc.
 */

#include <linux/linkage.h>
#include <variant/core.h>
#include <asm/asmmacro.h>

@@ -30,10 +31,8 @@
 */

.text
.align	4
.global	memset
.type	memset,@function
memset:
ENTRY(memset)

	entry	sp, 16		# minimal stack frame
	# a2/ dst, a3/ c, a4/ length
	extui	a3, a3, 0, 8	# mask to just 8 bits
@@ -141,6 +140,7 @@ EX(10f) s8i a3, a5, 0
.Lbytesetdone:
	retw

ENDPROC(memset)

	.section .fixup, "ax"
	.align	4
+4 −4
Original line number Diff line number Diff line
@@ -12,6 +12,7 @@
 */

#include <linux/errno.h>
#include <linux/linkage.h>
#include <variant/core.h>
#include <asm/asmmacro.h>

@@ -47,10 +48,8 @@
#   a12/ tmp

.text
.align	4
.global	__strncpy_user
.type	__strncpy_user,@function
__strncpy_user:
ENTRY(__strncpy_user)

	entry	sp, 16		# minimal stack frame
	# a2/ dst, a3/ src, a4/ len
	mov	a11, a2		# leave dst in return value register
@@ -202,6 +201,7 @@ EX(10f) s8i a9, a11, 0
	sub	a2, a11, a2		# compute strlen
	retw

ENDPROC(__strncpy_user)

	.section .fixup, "ax"
	.align	4
+5 −4
Original line number Diff line number Diff line
@@ -11,6 +11,7 @@
 *  Copyright (C) 2002 Tensilica Inc.
 */

#include <linux/linkage.h>
#include <variant/core.h>
#include <asm/asmmacro.h>

@@ -42,10 +43,8 @@
#   a10/ tmp

.text
.align	4
.global	__strnlen_user
.type	__strnlen_user,@function
__strnlen_user:
ENTRY(__strnlen_user)

	entry	sp, 16		# minimal stack frame
	# a2/ s, a3/ len
	addi	a4, a2, -4	# because we overincrement at the end;
@@ -133,6 +132,8 @@ EX(10f) l32i a9, a4, 0 # get word with first two bytes of string
	sub	a2, a4, a2	# subtract to get length
	retw

ENDPROC(__strnlen_user)

	.section .fixup, "ax"
	.align	4
10:
Loading