Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 846a16bf authored by Lee Schermerhorn's avatar Lee Schermerhorn Committed by Linus Torvalds
Browse files

mempolicy: rename mpol_copy to mpol_dup



This patch renames mpol_copy() to mpol_dup() because, well, that's what it
does.  Like, e.g., strdup() for strings, mpol_dup() takes a pointer to an
existing mempolicy, allocates a new one and copies the contents.

In a later patch, I want to use the name mpol_copy() to copy the contents from
one mempolicy to another like, e.g., strcpy() does for strings.

Signed-off-by: default avatarLee Schermerhorn <lee.schermerhorn@hp.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f0be3d32
Loading
Loading
Loading
Loading
+7 −7
Original line number Original line Diff line number Diff line
@@ -73,10 +73,10 @@ struct mm_struct;
 * Mempolicy objects are reference counted.  A mempolicy will be freed when
 * Mempolicy objects are reference counted.  A mempolicy will be freed when
 * mpol_put() decrements the reference count to zero.
 * mpol_put() decrements the reference count to zero.
 *
 *
 * Copying policy objects:
 * Duplicating policy objects:
 * mpol_copy() allocates a new mempolicy and copies the specified mempolicy
 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
 * to the new storage.  The reference count of the new object is initialized
 * to the new storage.  The reference count of the new object is initialized
 * to 1, representing the caller of mpol_copy().
 * to 1, representing the caller of mpol_dup().
 */
 */
struct mempolicy {
struct mempolicy {
	atomic_t refcnt;
	atomic_t refcnt;
@@ -105,11 +105,11 @@ static inline void mpol_put(struct mempolicy *pol)
		__mpol_put(pol);
		__mpol_put(pol);
}
}


extern struct mempolicy *__mpol_copy(struct mempolicy *pol);
extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
static inline struct mempolicy *mpol_copy(struct mempolicy *pol)
static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
{
{
	if (pol)
	if (pol)
		pol = __mpol_copy(pol);
		pol = __mpol_dup(pol);
	return pol;
	return pol;
}
}


@@ -198,7 +198,7 @@ static inline void mpol_get(struct mempolicy *pol)
{
{
}
}


static inline struct mempolicy *mpol_copy(struct mempolicy *old)
static inline struct mempolicy *mpol_dup(struct mempolicy *old)
{
{
	return NULL;
	return NULL;
}
}
+2 −2
Original line number Original line Diff line number Diff line
@@ -941,7 +941,7 @@ static int update_nodemask(struct cpuset *cs, char *buf)
	cs->mems_generation = cpuset_mems_generation++;
	cs->mems_generation = cpuset_mems_generation++;
	mutex_unlock(&callback_mutex);
	mutex_unlock(&callback_mutex);


	cpuset_being_rebound = cs;		/* causes mpol_copy() rebind */
	cpuset_being_rebound = cs;		/* causes mpol_dup() rebind */


	fudge = 10;				/* spare mmarray[] slots */
	fudge = 10;				/* spare mmarray[] slots */
	fudge += cpus_weight(cs->cpus_allowed);	/* imagine one fork-bomb/cpu */
	fudge += cpus_weight(cs->cpus_allowed);	/* imagine one fork-bomb/cpu */
@@ -992,7 +992,7 @@ static int update_nodemask(struct cpuset *cs, char *buf)
	 * rebind the vma mempolicies of each mm in mmarray[] to their
	 * rebind the vma mempolicies of each mm in mmarray[] to their
	 * new cpuset, and release that mm.  The mpol_rebind_mm()
	 * new cpuset, and release that mm.  The mpol_rebind_mm()
	 * call takes mmap_sem, which we couldn't take while holding
	 * call takes mmap_sem, which we couldn't take while holding
	 * tasklist_lock.  Forks can happen again now - the mpol_copy()
	 * tasklist_lock.  Forks can happen again now - the mpol_dup()
	 * cpuset_being_rebound check will catch such forks, and rebind
	 * cpuset_being_rebound check will catch such forks, and rebind
	 * their vma mempolicies too.  Because we still hold the global
	 * their vma mempolicies too.  Because we still hold the global
	 * cgroup_mutex, we know that no other rebind effort will
	 * cgroup_mutex, we know that no other rebind effort will
+2 −2
Original line number Original line Diff line number Diff line
@@ -279,7 +279,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
		if (!tmp)
		if (!tmp)
			goto fail_nomem;
			goto fail_nomem;
		*tmp = *mpnt;
		*tmp = *mpnt;
		pol = mpol_copy(vma_policy(mpnt));
		pol = mpol_dup(vma_policy(mpnt));
		retval = PTR_ERR(pol);
		retval = PTR_ERR(pol);
		if (IS_ERR(pol))
		if (IS_ERR(pol))
			goto fail_nomem_policy;
			goto fail_nomem_policy;
@@ -1116,7 +1116,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
	p->audit_context = NULL;
	p->audit_context = NULL;
	cgroup_fork(p);
	cgroup_fork(p);
#ifdef CONFIG_NUMA
#ifdef CONFIG_NUMA
 	p->mempolicy = mpol_copy(p->mempolicy);
	p->mempolicy = mpol_dup(p->mempolicy);
 	if (IS_ERR(p->mempolicy)) {
 	if (IS_ERR(p->mempolicy)) {
 		retval = PTR_ERR(p->mempolicy);
 		retval = PTR_ERR(p->mempolicy);
 		p->mempolicy = NULL;
 		p->mempolicy = NULL;
+3 −3
Original line number Original line Diff line number Diff line
@@ -1566,15 +1566,15 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
EXPORT_SYMBOL(alloc_pages_current);
EXPORT_SYMBOL(alloc_pages_current);


/*
/*
 * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it
 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
 * with the mems_allowed returned by cpuset_mems_allowed().  This
 * with the mems_allowed returned by cpuset_mems_allowed().  This
 * keeps mempolicies cpuset relative after its cpuset moves.  See
 * keeps mempolicies cpuset relative after its cpuset moves.  See
 * further kernel/cpuset.c update_nodemask().
 * further kernel/cpuset.c update_nodemask().
 */
 */


/* Slow path of a mempolicy copy */
/* Slow path of a mempolicy duplicate */
struct mempolicy *__mpol_copy(struct mempolicy *old)
struct mempolicy *__mpol_dup(struct mempolicy *old)
{
{
	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);


+2 −2
Original line number Original line Diff line number Diff line
@@ -1810,7 +1810,7 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
		new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
		new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
	}
	}


	pol = mpol_copy(vma_policy(vma));
	pol = mpol_dup(vma_policy(vma));
	if (IS_ERR(pol)) {
	if (IS_ERR(pol)) {
		kmem_cache_free(vm_area_cachep, new);
		kmem_cache_free(vm_area_cachep, new);
		return PTR_ERR(pol);
		return PTR_ERR(pol);
@@ -2126,7 +2126,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
		new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
		new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
		if (new_vma) {
		if (new_vma) {
			*new_vma = *vma;
			*new_vma = *vma;
			pol = mpol_copy(vma_policy(vma));
			pol = mpol_dup(vma_policy(vma));
			if (IS_ERR(pol)) {
			if (IS_ERR(pol)) {
				kmem_cache_free(vm_area_cachep, new_vma);
				kmem_cache_free(vm_area_cachep, new_vma);
				return NULL;
				return NULL;