Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a6eb9fe1 authored by FUJITA Tomonori's avatar FUJITA Tomonori Committed by Linus Torvalds
Browse files

dma-mapping: rename ARCH_KMALLOC_MINALIGN to ARCH_DMA_MINALIGN



Now each architecture has the own dma_get_cache_alignment implementation.

dma_get_cache_alignment returns the minimum DMA alignment.  Architectures
define it as ARCH_KMALLOC_MINALIGN (it's used to make sure that malloc'ed
buffer is DMA-safe; the buffer doesn't share a cache with the others).  So
we can unify dma_get_cache_alignment implementations.

This patch:

dma_get_cache_alignment() needs to know if an architecture defines
ARCH_KMALLOC_MINALIGN or not (needs to know if architecture has DMA
alignment restriction).  However, slab.h define ARCH_KMALLOC_MINALIGN if
architectures doesn't define it.

Let's rename ARCH_KMALLOC_MINALIGN to ARCH_DMA_MINALIGN.
ARCH_KMALLOC_MINALIGN is used only in the internals of slab/slob/slub
(except for crypto).

Signed-off-by: default avatarFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: <linux-arch@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent cd1542c8
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -14,7 +14,7 @@
 * cache before the transfer is done, causing old data to be seen by
 * cache before the transfer is done, causing old data to be seen by
 * the CPU.
 * the CPU.
 */
 */
#define ARCH_KMALLOC_MINALIGN	L1_CACHE_BYTES
#define ARCH_DMA_MINALIGN	L1_CACHE_BYTES


/*
/*
 * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers.
 * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers.
+1 −1
Original line number Original line Diff line number Diff line
@@ -11,7 +11,7 @@
 * cache before the transfer is done, causing old data to be seen by
 * cache before the transfer is done, causing old data to be seen by
 * the CPU.
 * the CPU.
 */
 */
#define ARCH_KMALLOC_MINALIGN	L1_CACHE_BYTES
#define ARCH_DMA_MINALIGN	L1_CACHE_BYTES


#ifndef __ASSEMBLER__
#ifndef __ASSEMBLER__
struct cache_info {
struct cache_info {
+1 −1
Original line number Original line Diff line number Diff line
@@ -15,7 +15,7 @@
#define L1_CACHE_BYTES	(1 << L1_CACHE_SHIFT)
#define L1_CACHE_BYTES	(1 << L1_CACHE_SHIFT)
#define SMP_CACHE_BYTES	L1_CACHE_BYTES
#define SMP_CACHE_BYTES	L1_CACHE_BYTES


#define ARCH_KMALLOC_MINALIGN	L1_CACHE_BYTES
#define ARCH_DMA_MINALIGN	L1_CACHE_BYTES


#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
#define __cacheline_aligned
#define __cacheline_aligned
+1 −1
Original line number Original line Diff line number Diff line
@@ -35,7 +35,7 @@
 * the slab must be aligned such that load- and store-double instructions don't
 * the slab must be aligned such that load- and store-double instructions don't
 * fault if used
 * fault if used
 */
 */
#define	ARCH_KMALLOC_MINALIGN		L1_CACHE_BYTES
#define	ARCH_DMA_MINALIGN		L1_CACHE_BYTES
#define	ARCH_SLAB_MINALIGN		L1_CACHE_BYTES
#define	ARCH_SLAB_MINALIGN		L1_CACHE_BYTES


/*****************************************************************************/
/*****************************************************************************/
+1 −1
Original line number Original line Diff line number Diff line
@@ -8,6 +8,6 @@
#define        L1_CACHE_SHIFT  4
#define        L1_CACHE_SHIFT  4
#define        L1_CACHE_BYTES  (1<< L1_CACHE_SHIFT)
#define        L1_CACHE_BYTES  (1<< L1_CACHE_SHIFT)


#define ARCH_KMALLOC_MINALIGN	L1_CACHE_BYTES
#define ARCH_DMA_MINALIGN	L1_CACHE_BYTES


#endif
#endif
Loading