Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 29d35325 authored by Sarangdhar Joshi's avatar Sarangdhar Joshi
Browse files

arm64: kernel: Update PERCPU_SECTION and RW_DATA_SECTION alignment



Update PERCPU_SECTION and RW_DATA_SECTION to align to L1_CACHE_BYTES
instead of hard coded 64 bytes since L1_CACHE_BYTES could get
updated to more than 64 bytes cacheline size.

Change-Id: I0bc1f0420675cf65e958c53a230357918f05aabc
Signed-off-by: default avatarSarangdhar Joshi <spjoshi@codeaurora.org>
parent 2f1c2747
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -9,6 +9,7 @@
#include <asm/memory.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/cache.h>

#include "image.h"

@@ -113,7 +114,7 @@ SECTIONS
		ARM_EXIT_KEEP(EXIT_DATA)
	}

	PERCPU_SECTION(64)
	PERCPU_SECTION(L1_CACHE_BYTES)

	. = ALIGN(PAGE_SIZE);
	__init_end = .;
@@ -131,7 +132,7 @@ SECTIONS
	. = ALIGN(PAGE_SIZE);
	_data = .;
	_sdata = .;
	RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE)
	RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
	_edata = .;

	BSS_SECTION(0, 0, 0)