Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e96636cc authored by Yoshinori Sato's avatar Yoshinori Sato Committed by Paul Mundt
Browse files

sh: Various nommu fixes.



This fixes up some of the various outstanding nommu bugs on
SH.

Signed-off-by: default avatarYoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent e7f93a35
Loading
Loading
Loading
Loading
+8 −2
Original line number Diff line number Diff line
@@ -21,11 +21,17 @@ endif
CONFIG_PAGE_OFFSET	?= 0x80000000
CONFIG_MEMORY_START     ?= 0x0c000000
CONFIG_BOOT_LINK_OFFSET ?= 0x00800000
IMAGE_OFFSET := $(shell printf "0x%8x" $$[$(CONFIG_PAGE_OFFSET)+$(CONFIG_MEMORY_START)+$(CONFIG_BOOT_LINK_OFFSET)])

IMAGE_OFFSET	:= $(shell printf "0x%8x" $$[$(CONFIG_PAGE_OFFSET)  + \
					     $(CONFIG_MEMORY_START) + \
					     $(CONFIG_BOOT_LINK_OFFSET)])

LIBGCC	:= $(shell $(CC) $(CFLAGS) -print-libgcc-file-name)

LDFLAGS_vmlinux := -Ttext $(IMAGE_OFFSET) -e startup -T $(obj)/../../kernel/vmlinux.lds

$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o FORCE

$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o $(LIBGCC) FORCE
	$(call if_changed,ld)
	@:

+5 −0
Original line number Diff line number Diff line
@@ -248,8 +248,13 @@ void __init mem_init(void)
	 * Setup wrappers for copy/clear_page(), these will get overridden
	 * later in the boot process if a better method is available.
	 */
#ifdef CONFIG_MMU
	copy_page = copy_page_slow;
	clear_page = clear_page_slow;
#else
	copy_page = copy_page_nommu;
	clear_page = clear_page_nommu;
#endif

	/* this will put all low memory onto the freelists */
	totalram_pages += free_all_bootmem_node(NODE_DATA(0));
+9 −8
Original line number Diff line number Diff line
@@ -14,23 +14,24 @@
#include <linux/string.h>
#include <asm/page.h>

static void copy_page_nommu(void *to, void *from)
void copy_page_nommu(void *to, void *from)
{
	memcpy(to, from, PAGE_SIZE);
}

static void clear_page_nommu(void *to)
void clear_page_nommu(void *to)
{
	memset(to, 0, PAGE_SIZE);
}

static int __init pg_nommu_init(void)
__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n)
{
	copy_page = copy_page_nommu;
	clear_page = clear_page_nommu;

	memcpy(to, from, n);
	return 0;
}

subsys_initcall(pg_nommu_init);
__kernel_size_t __clear_user(void *to, __kernel_size_t n)
{
	memset(to, 0, n);
	return 0;
}
+8 −0
Original line number Diff line number Diff line
@@ -14,11 +14,19 @@
#include <asm/cpu/addrspace.h>

/* Memory segments (32bit Privileged mode addresses)  */
#ifdef CONFIG_MMU
#define P0SEG		0x00000000
#define P1SEG		0x80000000
#define P2SEG		0xa0000000
#define P3SEG		0xc0000000
#define P4SEG		0xe0000000
#else
#define P0SEG		0x00000000
#define P1SEG		0x00000000
#define P2SEG		0x20000000
#define P3SEG		0x40000000
#define P4SEG 		0x80000000
#endif

/* Returns the privileged segment base of a given address  */
#define PXSEG(a)	(((unsigned long)(a)) & 0xe0000000)
+1 −1
Original line number Diff line number Diff line
@@ -13,7 +13,7 @@
#define __ASM_SH_FLAT_H

#define	flat_stack_align(sp)			/* nothing needed */
#define	flat_argvp_envp_on_stack()		1
#define	flat_argvp_envp_on_stack()		0
#define	flat_old_ram_flag(flags)		(flags)
#define	flat_reloc_valid(reloc, size)		((reloc) <= (size))
#define	flat_get_addr_from_rp(rp, relval, flags)	get_unaligned(rp)
Loading