Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 27ba0644 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds
Browse files

rmap: drop support of non-linear mappings



We don't create non-linear mappings anymore.  Let's drop code which
handles them in rmap.

Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1da4b35b
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -317,10 +317,10 @@ maps this page at its virtual address.
	about doing this.

	The idea is, first at flush_dcache_page() time, if
	page->mapping->i_mmap is an empty tree and ->i_mmap_nonlinear
	an empty list, just mark the architecture private page flag bit.
	Later, in update_mmu_cache(), a check is made of this flag bit,
	and if set the flush is done and the flag bit is cleared.
	page->mapping->i_mmap is an empty tree, just mark the architecture
	private page flag bit.  Later, in update_mmu_cache(), a check is
	made of this flag bit, and if set the flush is done and the flag
	bit is cleared.

	IMPORTANT NOTE: It is often important, if you defer the flush,
			that the actual flush occurs on the same CPU
+0 −1
Original line number Diff line number Diff line
@@ -355,7 +355,6 @@ void address_space_init_once(struct address_space *mapping)
	INIT_LIST_HEAD(&mapping->private_list);
	spin_lock_init(&mapping->private_lock);
	mapping->i_mmap = RB_ROOT;
	INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
}
EXPORT_SYMBOL(address_space_init_once);

+1 −3
Original line number Diff line number Diff line
@@ -401,7 +401,6 @@ struct address_space {
	spinlock_t		tree_lock;	/* and lock protecting it */
	atomic_t		i_mmap_writable;/* count VM_SHARED mappings */
	struct rb_root		i_mmap;		/* tree of private and shared mappings */
	struct list_head	i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
	struct rw_semaphore	i_mmap_rwsem;	/* protect tree, count, list */
	/* Protected by tree_lock together with the radix tree */
	unsigned long		nrpages;	/* number of total pages */
@@ -493,8 +492,7 @@ static inline void i_mmap_unlock_read(struct address_space *mapping)
 */
static inline int mapping_mapped(struct address_space *mapping)
{
	return	!RB_EMPTY_ROOT(&mapping->i_mmap) ||
		!list_empty(&mapping->i_mmap_nonlinear);
	return	!RB_EMPTY_ROOT(&mapping->i_mmap);
}

/*
+0 −6
Original line number Diff line number Diff line
@@ -1796,12 +1796,6 @@ struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
	for (vma = vma_interval_tree_iter_first(root, start, last);	\
	     vma; vma = vma_interval_tree_iter_next(vma, start, last))

static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
					struct list_head *list)
{
	list_add_tail(&vma->shared.nonlinear, list);
}

void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
				   struct rb_root *root);
void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
+1 −3
Original line number Diff line number Diff line
@@ -273,15 +273,13 @@ struct vm_area_struct {

	/*
	 * For areas with an address space and backing store,
	 * linkage into the address_space->i_mmap interval tree, or
	 * linkage of vma in the address_space->i_mmap_nonlinear list.
	 * linkage into the address_space->i_mmap interval tree.
	 */
	union {
		struct {
			struct rb_node rb;
			unsigned long rb_subtree_last;
		} linear;
		struct list_head nonlinear;
	} shared;

	/*
Loading