Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2cb82400 authored by Chris Metcalf's avatar Chris Metcalf
Browse files

arch/tile: catch up with section naming convention in 2.6.35



The convention changed to, e.g., ".data..page_aligned".  This commit
fixes the places in the tile architecture that were still using the
old convention.  One tile-specific section (.init.page) was dropped
in favor of just using an "aligned" attribute.

Sam Ravnborg <sam@ravnborg.org> pointed out __PAGE_ALIGNED_BSS, etc.

Signed-off-by: default avatarChris Metcalf <cmetcalf@tilera.com>
parent d356b595
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -40,7 +40,7 @@
#define INTERNODE_CACHE_BYTES   L2_CACHE_BYTES

/* Group together read-mostly things to avoid cache false sharing */
#define __read_mostly __attribute__((__section__(".data.read_mostly")))
#define __read_mostly __attribute__((__section__(".data..read_mostly")))

/*
 * Attribute for data that is kept read/write coherent until the end of
+2 −2
Original line number Diff line number Diff line
@@ -133,7 +133,7 @@ ENTRY(_start)
	}
	ENDPROC(_start)

.section ".bss.page_aligned","w"
__PAGE_ALIGNED_BSS
	.align PAGE_SIZE
ENTRY(empty_zero_page)
	.fill PAGE_SIZE,1,0
@@ -148,7 +148,7 @@ ENTRY(empty_zero_page)
	.word (\bits1) | (HV_CPA_TO_PFN(\cpa) << HV_PTE_INDEX_PFN)
	.endm

.section ".data.page_aligned","wa"
__PAGE_ALIGNED_DATA
	.align PAGE_SIZE
ENTRY(swapper_pg_dir)
	/*
+1 −4
Original line number Diff line number Diff line
@@ -59,10 +59,7 @@ SECTIONS

  . = ALIGN(PAGE_SIZE);
  VMLINUX_SYMBOL(_sinitdata) = .;
  .init.page : AT (ADDR(.init.page) - LOAD_OFFSET) {
    *(.init.page)
  } :data =0
  INIT_DATA_SECTION(16)
  INIT_DATA_SECTION(16) :data =0
  PERCPU(PAGE_SIZE)
  . = ALIGN(PAGE_SIZE);
  VMLINUX_SYMBOL(_einitdata) = .;
+1 −2
Original line number Diff line number Diff line
@@ -46,8 +46,7 @@ struct atomic_locks_on_cpu *atomic_lock_ptr[ATOMIC_HASH_L1_SIZE]
#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */

/* This page is remapped on startup to be hash-for-home. */
int atomic_locks[PAGE_SIZE / sizeof(int) /* Only ATOMIC_HASH_SIZE is used */]
  __attribute__((aligned(PAGE_SIZE), section(".bss.page_aligned")));
int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;

#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */

+1 −1
Original line number Diff line number Diff line
@@ -445,7 +445,7 @@ static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va)

/* Temporary page table we use for staging. */
static pgd_t pgtables[PTRS_PER_PGD]
 __attribute__((section(".init.page")));
 __attribute__((aligned(HV_PAGE_TABLE_ALIGN)));

/*
 * This maps the physical memory to kernel virtual address space, a total