Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f5278565 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "The gcc-4.4.4 workaround has actually been merged into a KVM tree by
  Paolo but it is stuck in linux-next and mainline needs it"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  arch/x86/kvm/mmu.c: work around gcc-4.4.4 bug
  sched, numa: do not hint for NUMA balancing on VM_MIXEDMAP mappings
  zsmalloc: fix a null pointer dereference in destroy_handle_cache()
  mm: memcontrol: fix false-positive VM_BUG_ON() on -rt
  checkpatch: fix "GLOBAL_INITIALISERS" test
  zram: clear disk io accounting when reset zram device
  memcg: do not call reclaim if !__GFP_WAIT
  mm/memory_hotplug.c: set zone->wait_table to null after freeing it
parents e64f6384 5ec45a19
Loading
Loading
Loading
Loading
+7 −7
Original line number Original line Diff line number Diff line
@@ -4215,13 +4215,13 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
	u64 entry, gentry, *spte;
	u64 entry, gentry, *spte;
	int npte;
	int npte;
	bool remote_flush, local_flush, zap_page;
	bool remote_flush, local_flush, zap_page;
	union kvm_mmu_page_role mask = (union kvm_mmu_page_role) {
	union kvm_mmu_page_role mask = { };
		.cr0_wp = 1,

		.cr4_pae = 1,
	mask.cr0_wp = 1;
		.nxe = 1,
	mask.cr4_pae = 1;
		.smep_andnot_wp = 1,
	mask.nxe = 1;
		.smap_andnot_wp = 1,
	mask.smep_andnot_wp = 1;
	};
	mask.smap_andnot_wp = 1;


	/*
	/*
	 * If we don't have indirect shadow pages, it means no page is
	 * If we don't have indirect shadow pages, it means no page is
+2 −0
Original line number Original line Diff line number Diff line
@@ -805,7 +805,9 @@ static void zram_reset_device(struct zram *zram)
	memset(&zram->stats, 0, sizeof(zram->stats));
	memset(&zram->stats, 0, sizeof(zram->stats));
	zram->disksize = 0;
	zram->disksize = 0;
	zram->max_comp_streams = 1;
	zram->max_comp_streams = 1;

	set_capacity(zram->disk, 0);
	set_capacity(zram->disk, 0);
	part_stat_set_all(&zram->disk->part0, 0);


	up_write(&zram->init_lock);
	up_write(&zram->init_lock);
	/* I/O operation under all of CPU are done so let's free */
	/* I/O operation under all of CPU are done so let's free */
+1 −1
Original line number Original line Diff line number Diff line
@@ -2181,7 +2181,7 @@ void task_numa_work(struct callback_head *work)
	}
	}
	for (; vma; vma = vma->vm_next) {
	for (; vma; vma = vma->vm_next) {
		if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
		if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
			is_vm_hugetlb_page(vma)) {
			is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
			continue;
			continue;
		}
		}


+3 −3
Original line number Original line Diff line number Diff line
@@ -2323,6 +2323,8 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
	css_get_many(&memcg->css, batch);
	css_get_many(&memcg->css, batch);
	if (batch > nr_pages)
	if (batch > nr_pages)
		refill_stock(memcg, batch - nr_pages);
		refill_stock(memcg, batch - nr_pages);
	if (!(gfp_mask & __GFP_WAIT))
		goto done;
	/*
	/*
	 * If the hierarchy is above the normal consumption range,
	 * If the hierarchy is above the normal consumption range,
	 * make the charging task trim their excess contribution.
	 * make the charging task trim their excess contribution.
@@ -5833,9 +5835,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
	if (!mem_cgroup_is_root(memcg))
	if (!mem_cgroup_is_root(memcg))
		page_counter_uncharge(&memcg->memory, 1);
		page_counter_uncharge(&memcg->memory, 1);


	/* XXX: caller holds IRQ-safe mapping->tree_lock */
	/* Caller disabled preemption with mapping->tree_lock */
	VM_BUG_ON(!irqs_disabled());

	mem_cgroup_charge_statistics(memcg, page, -1);
	mem_cgroup_charge_statistics(memcg, page, -1);
	memcg_check_events(memcg, page);
	memcg_check_events(memcg, page);
}
}
+3 −1
Original line number Original line Diff line number Diff line
@@ -1969,8 +1969,10 @@ void try_offline_node(int nid)
		 * wait_table may be allocated from boot memory,
		 * wait_table may be allocated from boot memory,
		 * here only free if it's allocated by vmalloc.
		 * here only free if it's allocated by vmalloc.
		 */
		 */
		if (is_vmalloc_addr(zone->wait_table))
		if (is_vmalloc_addr(zone->wait_table)) {
			vfree(zone->wait_table);
			vfree(zone->wait_table);
			zone->wait_table = NULL;
		}
	}
	}
}
}
EXPORT_SYMBOL(try_offline_node);
EXPORT_SYMBOL(try_offline_node);
Loading