Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ce944935 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "9 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  fs/proc/proc_sysctl.c: Fix a NULL pointer dereference
  mm/page_alloc.c: fix never set ALLOC_NOFRAGMENT flag
  mm/page_alloc.c: avoid potential NULL pointer dereference
  mm, page_alloc: always use a captured page regardless of compaction result
  mm: do not boost watermarks to avoid fragmentation for the DISCONTIG memory model
  lib/test_vmalloc.c: do not create cpumask_t variable on stack
  lib/Kconfig.debug: fix build error without CONFIG_BLOCK
  zram: pass down the bvec we need to read into in the work struct
  mm/memory_hotplug.c: drop memory device reference after find_memory_block()
parents 857e17c2 89189557
Loading
Loading
Loading
Loading
+8 −8
Original line number Original line Diff line number Diff line
@@ -866,14 +866,14 @@ The intent is that compaction has less work to do in the future and to
increase the success rate of future high-order allocations such as SLUB
increase the success rate of future high-order allocations such as SLUB
allocations, THP and hugetlbfs pages.
allocations, THP and hugetlbfs pages.


To make it sensible with respect to the watermark_scale_factor parameter,
To make it sensible with respect to the watermark_scale_factor
the unit is in fractions of 10,000. The default value of 15,000 means
parameter, the unit is in fractions of 10,000. The default value of
that up to 150% of the high watermark will be reclaimed in the event of
15,000 on !DISCONTIGMEM configurations means that up to 150% of the high
a pageblock being mixed due to fragmentation. The level of reclaim is
watermark will be reclaimed in the event of a pageblock being mixed due
determined by the number of fragmentation events that occurred in the
to fragmentation. The level of reclaim is determined by the number of
recent past. If this value is smaller than a pageblock then a pageblocks
fragmentation events that occurred in the recent past. If this value is
worth of pages will be reclaimed (e.g.  2MB on 64-bit x86). A boost factor
smaller than a pageblock then a pageblocks worth of pages will be reclaimed
of 0 will disable the feature.
(e.g.  2MB on 64-bit x86). A boost factor of 0 will disable the feature.


=============================================================
=============================================================


+3 −2
Original line number Original line Diff line number Diff line
@@ -774,18 +774,18 @@ struct zram_work {
	struct zram *zram;
	struct zram *zram;
	unsigned long entry;
	unsigned long entry;
	struct bio *bio;
	struct bio *bio;
	struct bio_vec bvec;
};
};


#if PAGE_SIZE != 4096
#if PAGE_SIZE != 4096
static void zram_sync_read(struct work_struct *work)
static void zram_sync_read(struct work_struct *work)
{
{
	struct bio_vec bvec;
	struct zram_work *zw = container_of(work, struct zram_work, work);
	struct zram_work *zw = container_of(work, struct zram_work, work);
	struct zram *zram = zw->zram;
	struct zram *zram = zw->zram;
	unsigned long entry = zw->entry;
	unsigned long entry = zw->entry;
	struct bio *bio = zw->bio;
	struct bio *bio = zw->bio;


	read_from_bdev_async(zram, &bvec, entry, bio);
	read_from_bdev_async(zram, &zw->bvec, entry, bio);
}
}


/*
/*
@@ -798,6 +798,7 @@ static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
{
{
	struct zram_work work;
	struct zram_work work;


	work.bvec = *bvec;
	work.zram = zram;
	work.zram = zram;
	work.entry = entry;
	work.entry = entry;
	work.bio = bio;
	work.bio = bio;
+4 −2
Original line number Original line Diff line number Diff line
@@ -1626,9 +1626,11 @@ static void drop_sysctl_table(struct ctl_table_header *header)
	if (--header->nreg)
	if (--header->nreg)
		return;
		return;


	if (parent)
	if (parent) {
		put_links(header);
		put_links(header);
		start_unregistering(header);
		start_unregistering(header);
	}

	if (!--header->count)
	if (!--header->count)
		kfree_rcu(header, rcu);
		kfree_rcu(header, rcu);


+1 −0
Original line number Original line Diff line number Diff line
@@ -1929,6 +1929,7 @@ config TEST_KMOD
	depends on m
	depends on m
	depends on BLOCK && (64BIT || LBDAF)	  # for XFS, BTRFS
	depends on BLOCK && (64BIT || LBDAF)	  # for XFS, BTRFS
	depends on NETDEVICES && NET_CORE && INET # for TUN
	depends on NETDEVICES && NET_CORE && INET # for TUN
	depends on BLOCK
	select TEST_LKM
	select TEST_LKM
	select XFS_FS
	select XFS_FS
	select TUN
	select TUN
+3 −3
Original line number Original line Diff line number Diff line
@@ -383,14 +383,14 @@ static void shuffle_array(int *arr, int n)
static int test_func(void *private)
static int test_func(void *private)
{
{
	struct test_driver *t = private;
	struct test_driver *t = private;
	cpumask_t newmask = CPU_MASK_NONE;
	int random_array[ARRAY_SIZE(test_case_array)];
	int random_array[ARRAY_SIZE(test_case_array)];
	int index, i, j, ret;
	int index, i, j, ret;
	ktime_t kt;
	ktime_t kt;
	u64 delta;
	u64 delta;


	cpumask_set_cpu(t->cpu, &newmask);
	ret = set_cpus_allowed_ptr(current, cpumask_of(t->cpu));
	set_cpus_allowed_ptr(current, &newmask);
	if (ret < 0)
		pr_err("Failed to set affinity to %d CPU\n", t->cpu);


	for (i = 0; i < ARRAY_SIZE(test_case_array); i++)
	for (i = 0; i < ARRAY_SIZE(test_case_array); i++)
		random_array[i] = i;
		random_array[i] = i;
Loading