Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 93e2e851 authored by Michal Simek's avatar Michal Simek
Browse files

microblaze: Separate library optimized functions



memcpy/memmove/memset

Signed-off-by: default avatarMichal Simek <monstr@monstr.eu>
parent ccea0e6e
Loading
Loading
Loading
Loading
+10 −3
Original line number Diff line number Diff line
@@ -33,17 +33,24 @@
#include <asm/system.h>

#ifdef __HAVE_ARCH_MEMCPY
#ifndef CONFIG_OPT_LIB_FUNCTION
void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
{
	const char *src = v_src;
	char *dst = v_dst;
#ifndef CONFIG_OPT_LIB_FUNCTION

	/* Simple, byte oriented memcpy. */
	while (c--)
		*dst++ = *src++;

	return v_dst;
#else
}
#else /* CONFIG_OPT_LIB_FUNCTION */
void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
{
	const char *src = v_src;
	char *dst = v_dst;

	/* The following code tries to optimize the copy by using unsigned
	 * alignment. This will work fine if both source and destination are
	 * aligned on the same boundary. However, if they are aligned on
@@ -150,7 +157,7 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
	}

	return v_dst;
#endif
}
#endif /* CONFIG_OPT_LIB_FUNCTION */
EXPORT_SYMBOL(memcpy);
#endif /* __HAVE_ARCH_MEMCPY */
+18 −8
Original line number Diff line number Diff line
@@ -31,16 +31,12 @@
#include <linux/string.h>

#ifdef __HAVE_ARCH_MEMMOVE
#ifndef CONFIG_OPT_LIB_FUNCTION
void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
{
	const char *src = v_src;
	char *dst = v_dst;

#ifdef CONFIG_OPT_LIB_FUNCTION
	const uint32_t *i_src;
	uint32_t *i_dst;
#endif

	if (!c)
		return v_dst;

@@ -48,7 +44,6 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
	if (v_dst <= v_src)
		return memcpy(v_dst, v_src, c);

#ifndef CONFIG_OPT_LIB_FUNCTION
	/* copy backwards, from end to beginning */
	src += c;
	dst += c;
@@ -58,7 +53,22 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
		*--dst = *--src;

	return v_dst;
#else
}
#else /* CONFIG_OPT_LIB_FUNCTION */
void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
{
	const char *src = v_src;
	char *dst = v_dst;
	const uint32_t *i_src;
	uint32_t *i_dst;

	if (!c)
		return v_dst;

	/* Use memcpy when source is higher than dest */
	if (v_dst <= v_src)
		return memcpy(v_dst, v_src, c);

	/* The following code tries to optimize the copy by using unsigned
	 * alignment. This will work fine if both source and destination are
	 * aligned on the same boundary. However, if they are aligned on
@@ -169,7 +179,7 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
		*--dst = *--src;
	}
	return v_dst;
#endif
}
#endif /* CONFIG_OPT_LIB_FUNCTION */
EXPORT_SYMBOL(memmove);
#endif /* __HAVE_ARCH_MEMMOVE */
+18 −4
Original line number Diff line number Diff line
@@ -31,17 +31,30 @@
#include <linux/string.h>

#ifdef __HAVE_ARCH_MEMSET
#ifndef CONFIG_OPT_LIB_FUNCTION
void *memset(void *v_src, int c, __kernel_size_t n)
{
	char *src = v_src;

	/* Truncate c to 8 bits */
	c = (c & 0xFF);

	/* Simple, byte oriented memset or the rest of count. */
	while (n--)
		*src++ = c;

	return v_src;
}
#else /* CONFIG_OPT_LIB_FUNCTION */
void *memset(void *v_src, int c, __kernel_size_t n)
{
	char *src = v_src;
#ifdef CONFIG_OPT_LIB_FUNCTION
	uint32_t *i_src;
	uint32_t w32 = 0;
#endif

	/* Truncate c to 8 bits */
	c = (c & 0xFF);

#ifdef CONFIG_OPT_LIB_FUNCTION
	if (unlikely(c)) {
		/* Make a repeating word out of it */
		w32 = c;
@@ -72,12 +85,13 @@ void *memset(void *v_src, int c, __kernel_size_t n)

		src  = (void *)i_src;
	}
#endif

	/* Simple, byte oriented memset or the rest of count. */
	while (n--)
		*src++ = c;

	return v_src;
}
#endif /* CONFIG_OPT_LIB_FUNCTION */
EXPORT_SYMBOL(memset);
#endif /* __HAVE_ARCH_MEMSET */