Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c40f6f8b authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-nommu:
  NOMMU: Support XIP on initramfs
  NOMMU: Teach kobjsize() about VMA regions.
  FLAT: Don't attempt to expand the userspace stack to fill the space allocated
  FDPIC: Don't attempt to expand the userspace stack to fill the space allocated
  NOMMU: Improve procfs output using per-MM VMAs
  NOMMU: Make mmap allocation page trimming behaviour configurable.
  NOMMU: Make VMAs per MM as for MMU-mode linux
  NOMMU: Delete askedalloc and realalloc variables
  NOMMU: Rename ARM's struct vm_region
  NOMMU: Fix cleanup handling in ramfs_nommu_get_umapped_area()
parents 1a7d0f0b cb6ff208
Loading
Loading
Loading
Loading
+26 −5
Original line number Diff line number Diff line
@@ -109,12 +109,18 @@ and it's also much more restricted in the latter case:
FURTHER NOTES ON NO-MMU MMAP
============================

 (*) A request for a private mapping of less than a page in size may not return
     a page-aligned buffer. This is because the kernel calls kmalloc() to
     allocate the buffer, not get_free_page().
 (*) A request for a private mapping of a file may return a buffer that is not
     page-aligned.  This is because XIP may take place, and the data may not be
     paged aligned in the backing store.

 (*) A list of all the mappings on the system is visible through /proc/maps in
     no-MMU mode.
 (*) A request for an anonymous mapping will always be page aligned.  If
     possible the size of the request should be a power of two otherwise some
     of the space may be wasted as the kernel must allocate a power-of-2
     granule but will only discard the excess if appropriately configured as
     this has an effect on fragmentation.

 (*) A list of all the private copy and anonymous mappings on the system is
     visible through /proc/maps in no-MMU mode.

 (*) A list of all the mappings in use by a process is visible through
     /proc/<pid>/maps in no-MMU mode.
@@ -242,3 +248,18 @@ PROVIDING SHAREABLE BLOCK DEVICE SUPPORT
Provision of shared mappings on block device files is exactly the same as for
character devices. If there isn't a real device underneath, then the driver
should allocate sufficient contiguous memory to honour any supported mapping.


=================================
ADJUSTING PAGE TRIMMING BEHAVIOUR
=================================

NOMMU mmap automatically rounds up to the nearest power-of-2 number of pages
when performing an allocation.  This can have adverse effects on memory
fragmentation, and as such, is left configurable.  The default behaviour is to
aggressively trim allocations and discard any excess pages back in to the page
allocator.  In order to retain finer-grained control over fragmentation, this
behaviour can either be disabled completely, or bumped up to a higher page
watermark where trimming begins.

Page trimming behaviour is configurable via the sysctl `vm.nr_trim_pages'.
+18 −0
Original line number Diff line number Diff line
@@ -38,6 +38,7 @@ Currently, these files are in /proc/sys/vm:
- numa_zonelist_order
- nr_hugepages
- nr_overcommit_hugepages
- nr_trim_pages		(only if CONFIG_MMU=n)

==============================================================

@@ -348,3 +349,20 @@ Change the maximum size of the hugepage pool. The maximum is
nr_hugepages + nr_overcommit_hugepages.

See Documentation/vm/hugetlbpage.txt

==============================================================

nr_trim_pages

This is available only on NOMMU kernels.

This value adjusts the excess page trimming behaviour of power-of-2 aligned
NOMMU mmap allocations.

A value of 0 disables trimming of allocations entirely, while a value of 1
trims excess pages aggressively. Any value >= 1 acts as the watermark where
trimming of allocations is initiated.

The default value is 1.

See Documentation/nommu-mmap.txt for more information.
+0 −1
Original line number Diff line number Diff line
@@ -24,7 +24,6 @@ typedef struct {
 *  modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com>
 */
typedef struct {
	struct vm_list_struct	*vmlist;
	unsigned long		end_brk;
} mm_context_t;

+14 −14
Original line number Diff line number Diff line
@@ -71,7 +71,7 @@ static DEFINE_SPINLOCK(consistent_lock);
 * the amount of RAM found at boot time.)  I would imagine that get_vm_area()
 * would have to initialise this each time prior to calling vm_region_alloc().
 */
struct vm_region {
struct arm_vm_region {
	struct list_head	vm_list;
	unsigned long		vm_start;
	unsigned long		vm_end;
@@ -79,20 +79,20 @@ struct vm_region {
	int			vm_active;
};

static struct vm_region consistent_head = {
static struct arm_vm_region consistent_head = {
	.vm_list	= LIST_HEAD_INIT(consistent_head.vm_list),
	.vm_start	= CONSISTENT_BASE,
	.vm_end		= CONSISTENT_END,
};

static struct vm_region *
vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp)
static struct arm_vm_region *
arm_vm_region_alloc(struct arm_vm_region *head, size_t size, gfp_t gfp)
{
	unsigned long addr = head->vm_start, end = head->vm_end - size;
	unsigned long flags;
	struct vm_region *c, *new;
	struct arm_vm_region *c, *new;

	new = kmalloc(sizeof(struct vm_region), gfp);
	new = kmalloc(sizeof(struct arm_vm_region), gfp);
	if (!new)
		goto out;

@@ -127,9 +127,9 @@ vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp)
	return NULL;
}

static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr)
static struct arm_vm_region *arm_vm_region_find(struct arm_vm_region *head, unsigned long addr)
{
	struct vm_region *c;
	struct arm_vm_region *c;
	
	list_for_each_entry(c, &head->vm_list, vm_list) {
		if (c->vm_active && c->vm_start == addr)
@@ -149,7 +149,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
	    pgprot_t prot)
{
	struct page *page;
	struct vm_region *c;
	struct arm_vm_region *c;
	unsigned long order;
	u64 mask = ISA_DMA_THRESHOLD, limit;

@@ -214,7 +214,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
	/*
	 * Allocate a virtual address in the consistent mapping region.
	 */
	c = vm_region_alloc(&consistent_head, size,
	c = arm_vm_region_alloc(&consistent_head, size,
			    gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
	if (c) {
		pte_t *pte;
@@ -311,13 +311,13 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
		    void *cpu_addr, dma_addr_t dma_addr, size_t size)
{
	unsigned long flags, user_size, kern_size;
	struct vm_region *c;
	struct arm_vm_region *c;
	int ret = -ENXIO;

	user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;

	spin_lock_irqsave(&consistent_lock, flags);
	c = vm_region_find(&consistent_head, (unsigned long)cpu_addr);
	c = arm_vm_region_find(&consistent_head, (unsigned long)cpu_addr);
	spin_unlock_irqrestore(&consistent_lock, flags);

	if (c) {
@@ -359,7 +359,7 @@ EXPORT_SYMBOL(dma_mmap_writecombine);
 */
void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
{
	struct vm_region *c;
	struct arm_vm_region *c;
	unsigned long flags, addr;
	pte_t *ptep;
	int idx;
@@ -378,7 +378,7 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
	size = PAGE_ALIGN(size);

	spin_lock_irqsave(&consistent_lock, flags);
	c = vm_region_find(&consistent_head, (unsigned long)cpu_addr);
	c = arm_vm_region_find(&consistent_head, (unsigned long)cpu_addr);
	if (!c)
		goto no_area;

+0 −1
Original line number Diff line number Diff line
@@ -10,7 +10,6 @@ struct sram_list_struct {
};

typedef struct {
	struct vm_list_struct *vmlist;
	unsigned long end_brk;
	unsigned long stack_start;

Loading