Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5e747772 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "Merge android-4.9.149 (ed0b11d2) into msm-4.9"

parents 4655aa54 b62aeea0
Loading
Loading
Loading
Loading
+31 −0
Original line number Diff line number Diff line
What:		/proc/pid/smaps_rollup
Date:		August 2017
Contact:	Daniel Colascione <dancol@google.com>
Description:
		This file provides pre-summed memory information for a
		process.  The format is identical to /proc/pid/smaps,
		except instead of an entry for each VMA in a process,
		smaps_rollup has a single entry (tagged "[rollup]")
		for which each field is the sum of the corresponding
		fields from all the maps in /proc/pid/smaps.
		For more details, see the procfs man page.

		Typical output looks like this:

		00100000-ff709000 ---p 00000000 00:00 0		 [rollup]
		Rss:		     884 kB
		Pss:		     385 kB
		Shared_Clean:	     696 kB
		Shared_Dirty:	       0 kB
		Private_Clean:	     120 kB
		Private_Dirty:	      68 kB
		Referenced:	     884 kB
		Anonymous:	      68 kB
		LazyFree:	       0 kB
		AnonHugePages:	       0 kB
		ShmemPmdMapped:	       0 kB
		Shared_Hugetlb:	       0 kB
		Private_Hugetlb:       0 kB
		Swap:		       0 kB
		SwapPss:	       0 kB
		Locked:		     385 kB
+14 −0
Original line number Diff line number Diff line
@@ -76,6 +76,20 @@ submit_from_crypt_cpus
    thread because it benefits CFQ to have writes submitted using the
    same context.

sector_size:<bytes>
    Use <bytes> as the encryption unit instead of 512 bytes sectors.
    This option can be in range 512 - 4096 bytes and must be power of two.
    Virtual device will announce this size as a minimal IO and logical sector.

iv_large_sectors
   IV generators will use sector number counted in <sector_size> units
   instead of default 512 bytes sectors.

   For example, if <sector_size> is 4096 bytes, plain64 IV for the second
   sector will be 8 (without flag) and 1 if iv_large_sectors is present.
   The <iv_offset> must be multiple of <sector_size> (in 512 bytes units)
   if this flag is specified.

Example scripts
===============
LUKS (Linux Unified Key Setup) is now the preferred way to set up disk
+641 −0

File added.

Preview size limit exceeded, changes collapsed.

+4 −2
Original line number Diff line number Diff line
VERSION = 4
PATCHLEVEL = 9
SUBLEVEL = 145
SUBLEVEL = 149
EXTRAVERSION =
NAME = Roaring Lionus

@@ -521,7 +521,9 @@ CLANG_TARGET := --target=$(notdir $(CLANG_TRIPLE:%-=%))
ifeq ($(shell $(srctree)/scripts/clang-android.sh $(CC) $(CLANG_TARGET)), y)
$(error "Clang with Android --target detected. Did you specify CLANG_TRIPLE?")
endif
GCC_TOOLCHAIN	:= $(realpath $(dir $(shell which $(LD)))/..)
GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
CLANG_PREFIX	:= --prefix=$(GCC_TOOLCHAIN_DIR)
GCC_TOOLCHAIN	:= $(realpath $(GCC_TOOLCHAIN_DIR)/..)
endif
ifneq ($(GCC_TOOLCHAIN),)
CLANG_GCC_TC	:= --gcc-toolchain=$(GCC_TOOLCHAIN)
+72 −0
Original line number Diff line number Diff line
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include <asm/byteorder.h>
#include <asm/page.h>
#include <asm/unaligned.h>

#ifdef CONFIG_ISA_ARCV2
#include <asm/barrier.h>
@@ -94,6 +95,42 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
	return w;
}

/*
 * {read,write}s{b,w,l}() repeatedly access the same IO address in
 * native endianness in 8-, 16-, 32-bit chunks {into,from} memory,
 * @count times
 */
#define __raw_readsx(t,f) \
static inline void __raw_reads##f(const volatile void __iomem *addr,	\
				  void *ptr, unsigned int count)	\
{									\
	bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0;	\
	u##t *buf = ptr;						\
									\
	if (!count)							\
		return;							\
									\
	/* Some ARC CPU's don't support unaligned accesses */		\
	if (is_aligned) {						\
		do {							\
			u##t x = __raw_read##f(addr);			\
			*buf++ = x;					\
		} while (--count);					\
	} else {							\
		do {							\
			u##t x = __raw_read##f(addr);			\
			put_unaligned(x, buf++);			\
		} while (--count);					\
	}								\
}

#define __raw_readsb __raw_readsb
__raw_readsx(8, b)
#define __raw_readsw __raw_readsw
__raw_readsx(16, w)
#define __raw_readsl __raw_readsl
__raw_readsx(32, l)

#define __raw_writeb __raw_writeb
static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
{
@@ -126,6 +163,35 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)

}

#define __raw_writesx(t,f)						\
static inline void __raw_writes##f(volatile void __iomem *addr, 	\
				   const void *ptr, unsigned int count)	\
{									\
	bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0;	\
	const u##t *buf = ptr;						\
									\
	if (!count)							\
		return;							\
									\
	/* Some ARC CPU's don't support unaligned accesses */		\
	if (is_aligned) {						\
		do {							\
			__raw_write##f(*buf++, addr);			\
		} while (--count);					\
	} else {							\
		do {							\
			__raw_write##f(get_unaligned(buf++), addr);	\
		} while (--count);					\
	}								\
}

#define __raw_writesb __raw_writesb
__raw_writesx(8, b)
#define __raw_writesw __raw_writesw
__raw_writesx(16, w)
#define __raw_writesl __raw_writesl
__raw_writesx(32, l)

/*
 * MMIO can also get buffered/optimized in micro-arch, so barriers needed
 * Based on ARM model for the typical use case
@@ -141,10 +207,16 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
#define readb(c)		({ u8  __v = readb_relaxed(c); __iormb(); __v; })
#define readw(c)		({ u16 __v = readw_relaxed(c); __iormb(); __v; })
#define readl(c)		({ u32 __v = readl_relaxed(c); __iormb(); __v; })
#define readsb(p,d,l)		({ __raw_readsb(p,d,l); __iormb(); })
#define readsw(p,d,l)		({ __raw_readsw(p,d,l); __iormb(); })
#define readsl(p,d,l)		({ __raw_readsl(p,d,l); __iormb(); })

#define writeb(v,c)		({ __iowmb(); writeb_relaxed(v,c); })
#define writew(v,c)		({ __iowmb(); writew_relaxed(v,c); })
#define writel(v,c)		({ __iowmb(); writel_relaxed(v,c); })
#define writesb(p,d,l)		({ __iowmb(); __raw_writesb(p,d,l); })
#define writesw(p,d,l)		({ __iowmb(); __raw_writesw(p,d,l); })
#define writesl(p,d,l)		({ __iowmb(); __raw_writesl(p,d,l); })

/*
 * Relaxed API for drivers which can handle barrier ordering themselves
Loading