Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4e35f483 authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Linus Torvalds
Browse files

mm, hugetlb: use vma_resv_map() map types



Util now, we get a resv_map by two ways according to each mapping type.
This makes code dirty and unreadable.  Unify it.

[davidlohr@hp.com: code cleanups]
Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarDavidlohr Bueso <davidlohr@hp.com>
Reviewed-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Reviewed-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f031dd27
Loading
Loading
Loading
Loading
+45 −50
Original line number Original line Diff line number Diff line
@@ -419,13 +419,24 @@ void resv_map_release(struct kref *ref)
	kfree(resv_map);
	kfree(resv_map);
}
}


static inline struct resv_map *inode_resv_map(struct inode *inode)
{
	return inode->i_mapping->private_data;
}

static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
{
{
	VM_BUG_ON(!is_vm_hugetlb_page(vma));
	VM_BUG_ON(!is_vm_hugetlb_page(vma));
	if (!(vma->vm_flags & VM_MAYSHARE))
	if (vma->vm_flags & VM_MAYSHARE) {
		struct address_space *mapping = vma->vm_file->f_mapping;
		struct inode *inode = mapping->host;

		return inode_resv_map(inode);

	} else {
		return (struct resv_map *)(get_vma_private_data(vma) &
		return (struct resv_map *)(get_vma_private_data(vma) &
							~HPAGE_RESV_MASK);
							~HPAGE_RESV_MASK);
	return NULL;
	}
}
}


static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
@@ -1167,49 +1178,35 @@ static void return_unused_surplus_pages(struct hstate *h,
static long vma_needs_reservation(struct hstate *h,
static long vma_needs_reservation(struct hstate *h,
			struct vm_area_struct *vma, unsigned long addr)
			struct vm_area_struct *vma, unsigned long addr)
{
{
	struct address_space *mapping = vma->vm_file->f_mapping;
	struct resv_map *resv;
	struct inode *inode = mapping->host;
	pgoff_t idx;

	long chg;
	if (vma->vm_flags & VM_MAYSHARE) {
		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
		struct resv_map *resv = inode->i_mapping->private_data;

		return region_chg(resv, idx, idx + 1);


	} else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
	resv = vma_resv_map(vma);
	if (!resv)
		return 1;
		return 1;


	} else  {
	idx = vma_hugecache_offset(h, vma, addr);
		long err;
	chg = region_chg(resv, idx, idx + 1);
		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
		struct resv_map *resv = vma_resv_map(vma);


		err = region_chg(resv, idx, idx + 1);
	if (vma->vm_flags & VM_MAYSHARE)
		if (err < 0)
		return chg;
			return err;
	else
		return 0;
		return chg < 0 ? chg : 0;
	}
}
}
static void vma_commit_reservation(struct hstate *h,
static void vma_commit_reservation(struct hstate *h,
			struct vm_area_struct *vma, unsigned long addr)
			struct vm_area_struct *vma, unsigned long addr)
{
{
	struct address_space *mapping = vma->vm_file->f_mapping;
	struct resv_map *resv;
	struct inode *inode = mapping->host;
	pgoff_t idx;

	if (vma->vm_flags & VM_MAYSHARE) {
		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
		struct resv_map *resv = inode->i_mapping->private_data;

		region_add(resv, idx, idx + 1);


	} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
	resv = vma_resv_map(vma);
		pgoff_t idx = vma_hugecache_offset(h, vma, addr);
	if (!resv)
		struct resv_map *resv = vma_resv_map(vma);
		return;


		/* Mark this page used in the map. */
	idx = vma_hugecache_offset(h, vma, addr);
	region_add(resv, idx, idx + 1);
	region_add(resv, idx, idx + 1);
}
}
}


static struct page *alloc_huge_page(struct vm_area_struct *vma,
static struct page *alloc_huge_page(struct vm_area_struct *vma,
				    unsigned long addr, int avoid_reserve)
				    unsigned long addr, int avoid_reserve)
@@ -2271,7 +2268,7 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
	 * after this open call completes.  It is therefore safe to take a
	 * after this open call completes.  It is therefore safe to take a
	 * new reference here without additional locking.
	 * new reference here without additional locking.
	 */
	 */
	if (resv)
	if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
		kref_get(&resv->refs);
		kref_get(&resv->refs);
}
}


@@ -2280,16 +2277,15 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
	struct hstate *h = hstate_vma(vma);
	struct hstate *h = hstate_vma(vma);
	struct resv_map *resv = vma_resv_map(vma);
	struct resv_map *resv = vma_resv_map(vma);
	struct hugepage_subpool *spool = subpool_vma(vma);
	struct hugepage_subpool *spool = subpool_vma(vma);
	unsigned long reserve;
	unsigned long reserve, start, end;
	unsigned long start;

	unsigned long end;
	if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
		return;


	if (resv) {
	start = vma_hugecache_offset(h, vma, vma->vm_start);
	start = vma_hugecache_offset(h, vma, vma->vm_start);
	end = vma_hugecache_offset(h, vma, vma->vm_end);
	end = vma_hugecache_offset(h, vma, vma->vm_end);


		reserve = (end - start) -
	reserve = (end - start) - region_count(resv, start, end);
			region_count(resv, start, end);


	kref_put(&resv->refs, resv_map_release);
	kref_put(&resv->refs, resv_map_release);


@@ -2298,7 +2294,6 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
		hugepage_subpool_put_pages(spool, reserve);
		hugepage_subpool_put_pages(spool, reserve);
	}
	}
}
}
}


/*
/*
 * We cannot handle pagefaults against hugetlb pages at all.  They cause
 * We cannot handle pagefaults against hugetlb pages at all.  They cause
@@ -3189,7 +3184,7 @@ int hugetlb_reserve_pages(struct inode *inode,
	 * called to make the mapping read-write. Assume !vma is a shm mapping
	 * called to make the mapping read-write. Assume !vma is a shm mapping
	 */
	 */
	if (!vma || vma->vm_flags & VM_MAYSHARE) {
	if (!vma || vma->vm_flags & VM_MAYSHARE) {
		resv_map = inode->i_mapping->private_data;
		resv_map = inode_resv_map(inode);


		chg = region_chg(resv_map, from, to);
		chg = region_chg(resv_map, from, to);


@@ -3248,7 +3243,7 @@ int hugetlb_reserve_pages(struct inode *inode,
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
{
{
	struct hstate *h = hstate_inode(inode);
	struct hstate *h = hstate_inode(inode);
	struct resv_map *resv_map = inode->i_mapping->private_data;
	struct resv_map *resv_map = inode_resv_map(inode);
	long chg = 0;
	long chg = 0;
	struct hugepage_subpool *spool = subpool_inode(inode);
	struct hugepage_subpool *spool = subpool_inode(inode);