Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 87d9ac71 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'akpm' (patches from Andrew)

Merge fixes from Andrew Morton:
 "10 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm: slab: free kmem_cache_node after destroy sysfs file
  ipc/shm: handle removed segments gracefully in shm_mmap()
  MAINTAINERS: update Kselftest Framework mailing list
  devm_memremap_release(): fix memremap'd addr handling
  mm/hugetlb.c: fix incorrect proc nr_hugepages value
  mm, x86: fix pte_page() crash in gup_pte_range()
  fsnotify: turn fsnotify reaper thread into a workqueue job
  Revert "fsnotify: destroy marks with call_srcu instead of dedicated thread"
  mm: fix regression in remap_file_pages() emulation
  thp, dax: do not try to withdraw pgtable from non-anon VMA
parents 23300f65 52b4b950
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -6128,7 +6128,7 @@ F: include/uapi/linux/sunrpc/


KERNEL SELFTEST FRAMEWORK
KERNEL SELFTEST FRAMEWORK
M:	Shuah Khan <shuahkh@osg.samsung.com>
M:	Shuah Khan <shuahkh@osg.samsung.com>
L:	linux-api@vger.kernel.org
L:	linux-kselftest@vger.kernel.org
T:	git git://git.kernel.org/pub/scm/shuah/linux-kselftest
T:	git git://git.kernel.org/pub/scm/shuah/linux-kselftest
S:	Maintained
S:	Maintained
F:	tools/testing/selftests
F:	tools/testing/selftests
+1 −1
Original line number Original line Diff line number Diff line
@@ -102,7 +102,6 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
			return 0;
			return 0;
		}
		}


		page = pte_page(pte);
		if (pte_devmap(pte)) {
		if (pte_devmap(pte)) {
			pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
			pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
			if (unlikely(!pgmap)) {
			if (unlikely(!pgmap)) {
@@ -115,6 +114,7 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
			return 0;
			return 0;
		}
		}
		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
		page = pte_page(pte);
		get_page(page);
		get_page(page);
		put_dev_pagemap(pgmap);
		put_dev_pagemap(pgmap);
		SetPageReferenced(page);
		SetPageReferenced(page);
+39 −14
Original line number Original line Diff line number Diff line
@@ -91,7 +91,14 @@
#include <linux/fsnotify_backend.h>
#include <linux/fsnotify_backend.h>
#include "fsnotify.h"
#include "fsnotify.h"


#define FSNOTIFY_REAPER_DELAY	(1)	/* 1 jiffy */

struct srcu_struct fsnotify_mark_srcu;
struct srcu_struct fsnotify_mark_srcu;
static DEFINE_SPINLOCK(destroy_lock);
static LIST_HEAD(destroy_list);

static void fsnotify_mark_destroy(struct work_struct *work);
static DECLARE_DELAYED_WORK(reaper_work, fsnotify_mark_destroy);


void fsnotify_get_mark(struct fsnotify_mark *mark)
void fsnotify_get_mark(struct fsnotify_mark *mark)
{
{
@@ -165,19 +172,10 @@ void fsnotify_detach_mark(struct fsnotify_mark *mark)
	atomic_dec(&group->num_marks);
	atomic_dec(&group->num_marks);
}
}


static void
fsnotify_mark_free_rcu(struct rcu_head *rcu)
{
	struct fsnotify_mark	*mark;

	mark = container_of(rcu, struct fsnotify_mark, g_rcu);
	fsnotify_put_mark(mark);
}

/*
/*
 * Free fsnotify mark. The freeing is actually happening from a call_srcu
 * Free fsnotify mark. The freeing is actually happening from a kthread which
 * callback. Caller must have a reference to the mark or be protected by
 * first waits for srcu period end. Caller must have a reference to the mark
 * fsnotify_mark_srcu.
 * or be protected by fsnotify_mark_srcu.
 */
 */
void fsnotify_free_mark(struct fsnotify_mark *mark)
void fsnotify_free_mark(struct fsnotify_mark *mark)
{
{
@@ -192,7 +190,11 @@ void fsnotify_free_mark(struct fsnotify_mark *mark)
	mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
	mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
	spin_unlock(&mark->lock);
	spin_unlock(&mark->lock);


	call_srcu(&fsnotify_mark_srcu, &mark->g_rcu, fsnotify_mark_free_rcu);
	spin_lock(&destroy_lock);
	list_add(&mark->g_list, &destroy_list);
	spin_unlock(&destroy_lock);
	queue_delayed_work(system_unbound_wq, &reaper_work,
				FSNOTIFY_REAPER_DELAY);


	/*
	/*
	 * Some groups like to know that marks are being freed.  This is a
	 * Some groups like to know that marks are being freed.  This is a
@@ -388,7 +390,12 @@ int fsnotify_add_mark_locked(struct fsnotify_mark *mark,


	spin_unlock(&mark->lock);
	spin_unlock(&mark->lock);


	call_srcu(&fsnotify_mark_srcu, &mark->g_rcu, fsnotify_mark_free_rcu);
	spin_lock(&destroy_lock);
	list_add(&mark->g_list, &destroy_list);
	spin_unlock(&destroy_lock);
	queue_delayed_work(system_unbound_wq, &reaper_work,
				FSNOTIFY_REAPER_DELAY);

	return ret;
	return ret;
}
}


@@ -491,3 +498,21 @@ void fsnotify_init_mark(struct fsnotify_mark *mark,
	atomic_set(&mark->refcnt, 1);
	atomic_set(&mark->refcnt, 1);
	mark->free_mark = free_mark;
	mark->free_mark = free_mark;
}
}

static void fsnotify_mark_destroy(struct work_struct *work)
{
	struct fsnotify_mark *mark, *next;
	struct list_head private_destroy_list;

	spin_lock(&destroy_lock);
	/* exchange the list head */
	list_replace_init(&destroy_list, &private_destroy_list);
	spin_unlock(&destroy_lock);

	synchronize_srcu(&fsnotify_mark_srcu);

	list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) {
		list_del_init(&mark->g_list);
		fsnotify_put_mark(mark);
	}
}
+1 −4
Original line number Original line Diff line number Diff line
@@ -220,10 +220,7 @@ struct fsnotify_mark {
	/* List of marks by group->i_fsnotify_marks. Also reused for queueing
	/* List of marks by group->i_fsnotify_marks. Also reused for queueing
	 * mark into destroy_list when it's waiting for the end of SRCU period
	 * mark into destroy_list when it's waiting for the end of SRCU period
	 * before it can be freed. [group->mark_mutex] */
	 * before it can be freed. [group->mark_mutex] */
	union {
	struct list_head g_list;
	struct list_head g_list;
		struct rcu_head g_rcu;
	};
	/* Protects inode / mnt pointers, flags, masks */
	/* Protects inode / mnt pointers, flags, masks */
	spinlock_t lock;
	spinlock_t lock;
	/* List of marks for inode / vfsmount [obj_lock] */
	/* List of marks for inode / vfsmount [obj_lock] */
+43 −10
Original line number Original line Diff line number Diff line
@@ -156,11 +156,12 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
	struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
	struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);


	/*
	/*
	 * We raced in the idr lookup or with shm_destroy().  Either way, the
	 * Callers of shm_lock() must validate the status of the returned ipc
	 * ID is busted.
	 * object pointer (as returned by ipc_lock()), and error out as
	 * appropriate.
	 */
	 */
	WARN_ON(IS_ERR(ipcp));
	if (IS_ERR(ipcp))

		return (void *)ipcp;
	return container_of(ipcp, struct shmid_kernel, shm_perm);
	return container_of(ipcp, struct shmid_kernel, shm_perm);
}
}


@@ -186,18 +187,33 @@ static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
}
}




/* This is called by fork, once for every shm attach. */
static int __shm_open(struct vm_area_struct *vma)
static void shm_open(struct vm_area_struct *vma)
{
{
	struct file *file = vma->vm_file;
	struct file *file = vma->vm_file;
	struct shm_file_data *sfd = shm_file_data(file);
	struct shm_file_data *sfd = shm_file_data(file);
	struct shmid_kernel *shp;
	struct shmid_kernel *shp;


	shp = shm_lock(sfd->ns, sfd->id);
	shp = shm_lock(sfd->ns, sfd->id);

	if (IS_ERR(shp))
		return PTR_ERR(shp);

	shp->shm_atim = get_seconds();
	shp->shm_atim = get_seconds();
	shp->shm_lprid = task_tgid_vnr(current);
	shp->shm_lprid = task_tgid_vnr(current);
	shp->shm_nattch++;
	shp->shm_nattch++;
	shm_unlock(shp);
	shm_unlock(shp);
	return 0;
}

/* This is called by fork, once for every shm attach. */
static void shm_open(struct vm_area_struct *vma)
{
	int err = __shm_open(vma);
	/*
	 * We raced in the idr lookup or with shm_destroy().
	 * Either way, the ID is busted.
	 */
	WARN_ON_ONCE(err);
}
}


/*
/*
@@ -260,6 +276,14 @@ static void shm_close(struct vm_area_struct *vma)
	down_write(&shm_ids(ns).rwsem);
	down_write(&shm_ids(ns).rwsem);
	/* remove from the list of attaches of the shm segment */
	/* remove from the list of attaches of the shm segment */
	shp = shm_lock(ns, sfd->id);
	shp = shm_lock(ns, sfd->id);

	/*
	 * We raced in the idr lookup or with shm_destroy().
	 * Either way, the ID is busted.
	 */
	if (WARN_ON_ONCE(IS_ERR(shp)))
		goto done; /* no-op */

	shp->shm_lprid = task_tgid_vnr(current);
	shp->shm_lprid = task_tgid_vnr(current);
	shp->shm_dtim = get_seconds();
	shp->shm_dtim = get_seconds();
	shp->shm_nattch--;
	shp->shm_nattch--;
@@ -267,6 +291,7 @@ static void shm_close(struct vm_area_struct *vma)
		shm_destroy(ns, shp);
		shm_destroy(ns, shp);
	else
	else
		shm_unlock(shp);
		shm_unlock(shp);
done:
	up_write(&shm_ids(ns).rwsem);
	up_write(&shm_ids(ns).rwsem);
}
}


@@ -388,17 +413,25 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
	struct shm_file_data *sfd = shm_file_data(file);
	struct shm_file_data *sfd = shm_file_data(file);
	int ret;
	int ret;


	/*
	 * In case of remap_file_pages() emulation, the file can represent
	 * removed IPC ID: propogate shm_lock() error to caller.
	 */
	ret =__shm_open(vma);
	if (ret)
		return ret;

	ret = sfd->file->f_op->mmap(sfd->file, vma);
	ret = sfd->file->f_op->mmap(sfd->file, vma);
	if (ret != 0)
	if (ret) {
		shm_close(vma);
		return ret;
		return ret;
	}
	sfd->vm_ops = vma->vm_ops;
	sfd->vm_ops = vma->vm_ops;
#ifdef CONFIG_MMU
#ifdef CONFIG_MMU
	WARN_ON(!sfd->vm_ops->fault);
	WARN_ON(!sfd->vm_ops->fault);
#endif
#endif
	vma->vm_ops = &shm_vm_ops;
	vma->vm_ops = &shm_vm_ops;
	shm_open(vma);
	return 0;

	return ret;
}
}


static int shm_release(struct inode *ino, struct file *file)
static int shm_release(struct inode *ino, struct file *file)
Loading