Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 06d97c58 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "18 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm, swap: use page-cluster as max window of VMA based swap readahead
  mm: page_vma_mapped: ensure pmd is loaded with READ_ONCE outside of lock
  kmemleak: clear stale pointers from task stacks
  fs/binfmt_misc.c: node could be NULL when evicting inode
  fs/mpage.c: fix mpage_writepage() for pages with buffers
  linux/kernel.h: add/correct kernel-doc notation
  tty: fall back to N_NULL if switching to N_TTY fails during hangup
  Revert "vmalloc: back off when the current task is killed"
  mm/cma.c: take __GFP_NOWARN into account in cma_alloc()
  scripts/kallsyms.c: ignore symbol type 'n'
  userfaultfd: selftest: exercise -EEXIST only in background transfer
  mm: only display online cpus of the numa node
  mm: remove unnecessary WARN_ONCE in page_vma_mapped_walk().
  mm/mempolicy: fix NUMA_INTERLEAVE_HIT counter
  include/linux/of.h: provide of_n_{addr,size}_cells wrappers for !CONFIG_OF
  mm/madvise.c: add description for MADV_WIPEONFORK and MADV_KEEPONFORK
  lib/Kconfig.debug: kernel hacking menu: runtime testing: keep tests together
  mm/migrate: fix indexing bug (off by one) and avoid out of bound access
parents e837d913 61b63972
Loading
Loading
Loading
Loading
+0 −10
Original line number Diff line number Diff line
@@ -14,13 +14,3 @@ Description: Enable/disable VMA based swap readahead.
		still used for tmpfs etc. other users.  If set to
		false, the global swap readahead algorithm will be
		used for all swappable pages.

What:		/sys/kernel/mm/swap/vma_ra_max_order
Date:		August 2017
Contact:	Linux memory management mailing list <linux-mm@kvack.org>
Description:	The max readahead size in order for VMA based swap readahead

		VMA based swap readahead algorithm will readahead at
		most 1 << max_order pages for each readahead.  The
		real readahead size for each readahead will be scaled
		according to the estimation algorithm.
+10 −2
Original line number Diff line number Diff line
@@ -27,13 +27,21 @@ static struct bus_type node_subsys = {

static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf)
{
	ssize_t n;
	cpumask_var_t mask;
	struct node *node_dev = to_node(dev);
	const struct cpumask *mask = cpumask_of_node(node_dev->dev.id);

	/* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
	BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1));

	return cpumap_print_to_pagebuf(list, buf, mask);
	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
		return 0;

	cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
	n = cpumap_print_to_pagebuf(list, buf, mask);
	free_cpumask_var(mask);

	return n;
}

static inline ssize_t node_read_cpumask(struct device *dev,
+5 −6
Original line number Diff line number Diff line
@@ -694,11 +694,9 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
	tty_set_termios_ldisc(tty, disc);
	retval = tty_ldisc_open(tty, tty->ldisc);
	if (retval) {
		if (!WARN_ON(disc == N_TTY)) {
		tty_ldisc_put(tty->ldisc);
		tty->ldisc = NULL;
	}
	}
	return retval;
}

@@ -752,8 +750,9 @@ void tty_ldisc_hangup(struct tty_struct *tty, bool reinit)

	if (tty->ldisc) {
		if (reinit) {
			if (tty_ldisc_reinit(tty, tty->termios.c_line) < 0)
				tty_ldisc_reinit(tty, N_TTY);
			if (tty_ldisc_reinit(tty, tty->termios.c_line) < 0 &&
			    tty_ldisc_reinit(tty, N_TTY) < 0)
				WARN_ON(tty_ldisc_reinit(tty, N_NULL) < 0);
		} else
			tty_ldisc_kill(tty);
	}
+1 −1
Original line number Diff line number Diff line
@@ -596,7 +596,7 @@ static void bm_evict_inode(struct inode *inode)
{
	Node *e = inode->i_private;

	if (e->flags & MISC_FMT_OPEN_FILE)
	if (e && e->flags & MISC_FMT_OPEN_FILE)
		filp_close(e->interp_file, NULL);

	clear_inode(inode);
+4 −2
Original line number Diff line number Diff line
@@ -716,10 +716,12 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,

	set_page_writeback(page);
	result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true);
	if (result)
	if (result) {
		end_page_writeback(page);
	else
	} else {
		clean_page_buffers(page);
		unlock_page(page);
	}
	blk_queue_exit(bdev->bd_queue);
	return result;
}
Loading