Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d2ffb010 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'akpm' (patches from Andrew)

Merge fixes from Andrew Morton:
 "20 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  rapidio/rio_cm: avoid GFP_KERNEL in atomic context
  Revert "ocfs2: bump up o2cb network protocol version"
  ocfs2: fix start offset to ocfs2_zero_range_for_truncate()
  cgroup: duplicate cgroup reference when cloning sockets
  mm: memcontrol: make per-cpu charge cache IRQ-safe for socket accounting
  ocfs2: fix double unlock in case retry after free truncate log
  fanotify: fix list corruption in fanotify_get_response()
  fsnotify: add a way to stop queueing events on group shutdown
  ocfs2: fix trans extend while free cached blocks
  ocfs2: fix trans extend while flush truncate log
  ipc/shm: fix crash if CONFIG_SHMEM is not set
  mm: fix the page_swap_info() BUG_ON check
  autofs: use dentry flags to block walks during expire
  MAINTAINERS: update email for VLYNQ bus entry
  mm: avoid endless recursion in dump_page()
  mm, thp: fix leaking mapped pte in __collapse_huge_page_swapin()
  khugepaged: fix use-after-free in collapse_huge_page()
  MAINTAINERS: Maik has moved
  ocfs2/dlm: fix race between convert and migration
  mem-hotplug: don't clear the only node in new_node_page()
parents 7fadce0d b92ae139
Loading
Loading
Loading
Loading
+2 −2
Original line number Original line Diff line number Diff line
@@ -6103,7 +6103,7 @@ S: Supported
F:	drivers/cpufreq/intel_pstate.c
F:	drivers/cpufreq/intel_pstate.c


INTEL FRAMEBUFFER DRIVER (excluding 810 and 815)
INTEL FRAMEBUFFER DRIVER (excluding 810 and 815)
M:	Maik Broemme <mbroemme@plusserver.de>
M:	Maik Broemme <mbroemme@libmpq.org>
L:	linux-fbdev@vger.kernel.org
L:	linux-fbdev@vger.kernel.org
S:	Maintained
S:	Maintained
F:	Documentation/fb/intelfb.txt
F:	Documentation/fb/intelfb.txt
@@ -12569,7 +12569,7 @@ F: include/linux/if_*vlan.h
F:	net/8021q/
F:	net/8021q/


VLYNQ BUS
VLYNQ BUS
M:	Florian Fainelli <florian@openwrt.org>
M:	Florian Fainelli <f.fainelli@gmail.com>
L:	openwrt-devel@lists.openwrt.org (subscribers-only)
L:	openwrt-devel@lists.openwrt.org (subscribers-only)
S:	Maintained
S:	Maintained
F:	drivers/vlynq/vlynq.c
F:	drivers/vlynq/vlynq.c
+16 −3
Original line number Original line Diff line number Diff line
@@ -2247,17 +2247,30 @@ static int rio_cm_shutdown(struct notifier_block *nb, unsigned long code,
{
{
	struct rio_channel *ch;
	struct rio_channel *ch;
	unsigned int i;
	unsigned int i;
	LIST_HEAD(list);


	riocm_debug(EXIT, ".");
	riocm_debug(EXIT, ".");


	/*
	 * If there are any channels left in connected state send
	 * close notification to the connection partner.
	 * First build a list of channels that require a closing
	 * notification because function riocm_send_close() should
	 * be called outside of spinlock protected code.
	 */
	spin_lock_bh(&idr_lock);
	spin_lock_bh(&idr_lock);
	idr_for_each_entry(&ch_idr, ch, i) {
	idr_for_each_entry(&ch_idr, ch, i) {
		if (ch->state == RIO_CM_CONNECTED) {
			riocm_debug(EXIT, "close ch %d", ch->id);
			riocm_debug(EXIT, "close ch %d", ch->id);
		if (ch->state == RIO_CM_CONNECTED)
			idr_remove(&ch_idr, ch->id);
			riocm_send_close(ch);
			list_add(&ch->ch_node, &list);
		}
	}
	}
	spin_unlock_bh(&idr_lock);
	spin_unlock_bh(&idr_lock);


	list_for_each_entry(ch, &list, ch_node)
		riocm_send_close(ch);

	return NOTIFY_DONE;
	return NOTIFY_DONE;
}
}


+42 −13
Original line number Original line Diff line number Diff line
@@ -417,6 +417,7 @@ static struct dentry *should_expire(struct dentry *dentry,
	}
	}
	return NULL;
	return NULL;
}
}

/*
/*
 * Find an eligible tree to time-out
 * Find an eligible tree to time-out
 * A tree is eligible if :-
 * A tree is eligible if :-
@@ -432,6 +433,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
	struct dentry *root = sb->s_root;
	struct dentry *root = sb->s_root;
	struct dentry *dentry;
	struct dentry *dentry;
	struct dentry *expired;
	struct dentry *expired;
	struct dentry *found;
	struct autofs_info *ino;
	struct autofs_info *ino;


	if (!root)
	if (!root)
@@ -442,31 +444,46 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,


	dentry = NULL;
	dentry = NULL;
	while ((dentry = get_next_positive_subdir(dentry, root))) {
	while ((dentry = get_next_positive_subdir(dentry, root))) {
		int flags = how;

		spin_lock(&sbi->fs_lock);
		spin_lock(&sbi->fs_lock);
		ino = autofs4_dentry_ino(dentry);
		ino = autofs4_dentry_ino(dentry);
		if (ino->flags & AUTOFS_INF_WANT_EXPIRE)
		if (ino->flags & AUTOFS_INF_WANT_EXPIRE) {
			expired = NULL;
		else
			expired = should_expire(dentry, mnt, timeout, how);
		if (!expired) {
			spin_unlock(&sbi->fs_lock);
			spin_unlock(&sbi->fs_lock);
			continue;
			continue;
		}
		}
		spin_unlock(&sbi->fs_lock);

		expired = should_expire(dentry, mnt, timeout, flags);
		if (!expired)
			continue;

		spin_lock(&sbi->fs_lock);
		ino = autofs4_dentry_ino(expired);
		ino = autofs4_dentry_ino(expired);
		ino->flags |= AUTOFS_INF_WANT_EXPIRE;
		ino->flags |= AUTOFS_INF_WANT_EXPIRE;
		spin_unlock(&sbi->fs_lock);
		spin_unlock(&sbi->fs_lock);
		synchronize_rcu();
		synchronize_rcu();
		spin_lock(&sbi->fs_lock);

		if (should_expire(expired, mnt, timeout, how)) {
		/* Make sure a reference is not taken on found if
		 * things have changed.
		 */
		flags &= ~AUTOFS_EXP_LEAVES;
		found = should_expire(expired, mnt, timeout, how);
		if (!found || found != expired)
			/* Something has changed, continue */
			goto next;

		if (expired != dentry)
		if (expired != dentry)
			dput(dentry);
			dput(dentry);
			goto found;
		}


		spin_lock(&sbi->fs_lock);
		goto found;
next:
		spin_lock(&sbi->fs_lock);
		ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
		ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
		spin_unlock(&sbi->fs_lock);
		if (expired != dentry)
		if (expired != dentry)
			dput(expired);
			dput(expired);
		spin_unlock(&sbi->fs_lock);
	}
	}
	return NULL;
	return NULL;


@@ -483,6 +500,7 @@ int autofs4_expire_wait(struct dentry *dentry, int rcu_walk)
	struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
	struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
	struct autofs_info *ino = autofs4_dentry_ino(dentry);
	struct autofs_info *ino = autofs4_dentry_ino(dentry);
	int status;
	int status;
	int state;


	/* Block on any pending expire */
	/* Block on any pending expire */
	if (!(ino->flags & AUTOFS_INF_WANT_EXPIRE))
	if (!(ino->flags & AUTOFS_INF_WANT_EXPIRE))
@@ -490,8 +508,19 @@ int autofs4_expire_wait(struct dentry *dentry, int rcu_walk)
	if (rcu_walk)
	if (rcu_walk)
		return -ECHILD;
		return -ECHILD;


retry:
	spin_lock(&sbi->fs_lock);
	spin_lock(&sbi->fs_lock);
	if (ino->flags & AUTOFS_INF_EXPIRING) {
	state = ino->flags & (AUTOFS_INF_WANT_EXPIRE | AUTOFS_INF_EXPIRING);
	if (state == AUTOFS_INF_WANT_EXPIRE) {
		spin_unlock(&sbi->fs_lock);
		/*
		 * Possibly being selected for expire, wait until
		 * it's selected or not.
		 */
		schedule_timeout_uninterruptible(HZ/10);
		goto retry;
	}
	if (state & AUTOFS_INF_EXPIRING) {
		spin_unlock(&sbi->fs_lock);
		spin_unlock(&sbi->fs_lock);


		pr_debug("waiting for expire %p name=%pd\n", dentry, dentry);
		pr_debug("waiting for expire %p name=%pd\n", dentry, dentry);
+1 −12
Original line number Original line Diff line number Diff line
@@ -67,18 +67,7 @@ static int fanotify_get_response(struct fsnotify_group *group,


	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
	pr_debug("%s: group=%p event=%p\n", __func__, group, event);


	wait_event(group->fanotify_data.access_waitq, event->response ||
	wait_event(group->fanotify_data.access_waitq, event->response);
				atomic_read(&group->fanotify_data.bypass_perm));

	if (!event->response) {	/* bypass_perm set */
		/*
		 * Event was canceled because group is being destroyed. Remove
		 * it from group's event list because we are responsible for
		 * freeing the permission event.
		 */
		fsnotify_remove_event(group, &event->fae.fse);
		return 0;
	}


	/* userspace responded, convert to something usable */
	/* userspace responded, convert to something usable */
	switch (event->response) {
	switch (event->response) {
+24 −12
Original line number Original line Diff line number Diff line
@@ -358,16 +358,20 @@ static int fanotify_release(struct inode *ignored, struct file *file)


#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
	struct fanotify_perm_event_info *event, *next;
	struct fanotify_perm_event_info *event, *next;
	struct fsnotify_event *fsn_event;


	/*
	/*
	 * There may be still new events arriving in the notification queue
	 * Stop new events from arriving in the notification queue. since
	 * but since userspace cannot use fanotify fd anymore, no event can
	 * userspace cannot use fanotify fd anymore, no event can enter or
	 * enter or leave access_list by now.
	 * leave access_list by now either.
	 */
	 */
	spin_lock(&group->fanotify_data.access_lock);
	fsnotify_group_stop_queueing(group);

	atomic_inc(&group->fanotify_data.bypass_perm);


	/*
	 * Process all permission events on access_list and notification queue
	 * and simulate reply from userspace.
	 */
	spin_lock(&group->fanotify_data.access_lock);
	list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
	list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
				 fae.fse.list) {
				 fae.fse.list) {
		pr_debug("%s: found group=%p event=%p\n", __func__, group,
		pr_debug("%s: found group=%p event=%p\n", __func__, group,
@@ -379,12 +383,21 @@ static int fanotify_release(struct inode *ignored, struct file *file)
	spin_unlock(&group->fanotify_data.access_lock);
	spin_unlock(&group->fanotify_data.access_lock);


	/*
	/*
	 * Since bypass_perm is set, newly queued events will not wait for
	 * Destroy all non-permission events. For permission events just
	 * access response. Wake up the already sleeping ones now.
	 * dequeue them and set the response. They will be freed once the
	 * synchronize_srcu() in fsnotify_destroy_group() will wait for all
	 * response is consumed and fanotify_get_response() returns.
	 * processes sleeping in fanotify_handle_event() waiting for access
	 * response and thus also for all permission events to be freed.
	 */
	 */
	mutex_lock(&group->notification_mutex);
	while (!fsnotify_notify_queue_is_empty(group)) {
		fsn_event = fsnotify_remove_first_event(group);
		if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS))
			fsnotify_destroy_event(group, fsn_event);
		else
			FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
	}
	mutex_unlock(&group->notification_mutex);

	/* Response for all permission events it set, wakeup waiters */
	wake_up(&group->fanotify_data.access_waitq);
	wake_up(&group->fanotify_data.access_waitq);
#endif
#endif


@@ -755,7 +768,6 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
	spin_lock_init(&group->fanotify_data.access_lock);
	spin_lock_init(&group->fanotify_data.access_lock);
	init_waitqueue_head(&group->fanotify_data.access_waitq);
	init_waitqueue_head(&group->fanotify_data.access_waitq);
	INIT_LIST_HEAD(&group->fanotify_data.access_list);
	INIT_LIST_HEAD(&group->fanotify_data.access_list);
	atomic_set(&group->fanotify_data.bypass_perm, 0);
#endif
#endif
	switch (flags & FAN_ALL_CLASS_BITS) {
	switch (flags & FAN_ALL_CLASS_BITS) {
	case FAN_CLASS_NOTIF:
	case FAN_CLASS_NOTIF:
Loading