Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3b7ee175 authored by TARKZiM's avatar TARKZiM
Browse files

[DNM] mm: Sync with oneplus msm8994 sources

parent 88f59194
Loading
Loading
Loading
Loading
+0 −6
Original line number Diff line number Diff line
@@ -612,12 +612,6 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
		}

		lowmem_deathpending_timeout = jiffies + HZ;
		/*
		 * FIXME: lowmemorykiller shouldn't abuse global OOM killer
		 * infrastructure. There is no real reason why the selected
		 * task should have access to the memory reserves.
		 */
		mark_tsk_oom_victim(selected);
		send_sig(SIGKILL, selected, 0);
		rem -= selected_tasksize;
		rcu_read_unlock();
+1 −1
Original line number Diff line number Diff line
@@ -1247,7 +1247,7 @@ cont:
			break;
	}
	pte_unmap_unlock(pte - 1, ptl);
	reclaimed = reclaim_pages_from_list(&page_list, vma);
	reclaim_pages_from_list(&page_list);
	rp->nr_reclaimed += reclaimed;
	rp->nr_to_reclaim -= reclaimed;
	if (rp->nr_to_reclaim < 0)
+2 −4
Original line number Diff line number Diff line
@@ -75,8 +75,7 @@ struct page *ksm_might_need_to_copy(struct page *page,

int page_referenced_ksm(struct page *page,
			struct mem_cgroup *memcg, unsigned long *vm_flags);
int try_to_unmap_ksm(struct page *page,
			enum ttu_flags flags, struct vm_area_struct *vma);
int try_to_unmap_ksm(struct page *page, enum ttu_flags flags);
int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
		  struct vm_area_struct *, unsigned long, void *), void *arg);
void ksm_migrate_page(struct page *newpage, struct page *oldpage);
@@ -116,8 +115,7 @@ static inline int page_referenced_ksm(struct page *page,
	return 0;
}

static inline int try_to_unmap_ksm(struct page *page,
			enum ttu_flags flags, struct vm_area_struct *target_vma)
static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
{
	return 0;
}
+9 −0
Original line number Diff line number Diff line
@@ -994,6 +994,10 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
 * (see the comment on walk_page_range() for more details)
 */
struct mm_walk {
	int (*pgd_entry)(pgd_t *pgd, unsigned long addr,
			 unsigned long next, struct mm_walk *walk);
	int (*pud_entry)(pud_t *pud, unsigned long addr,
	                 unsigned long next, struct mm_walk *walk);
	int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
			 unsigned long next, struct mm_walk *walk);
	int (*pte_entry)(pte_t *pte, unsigned long addr,
@@ -1632,6 +1636,11 @@ void page_cache_async_readahead(struct address_space *mapping,
				pgoff_t offset,
				unsigned long size);

unsigned long max_sane_readahead(unsigned long nr);
unsigned long ra_submit(struct file_ra_state *ra,
			struct address_space *mapping,
			struct file *filp);

extern unsigned long stack_guard_gap;
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
+2 −17
Original line number Diff line number Diff line
@@ -47,10 +47,6 @@ static inline bool oom_task_origin(const struct task_struct *p)
	return !!(p->signal->oom_flags & OOM_FLAG_ORIGIN);
}

extern void mark_tsk_oom_victim(struct task_struct *tsk);

extern void unmark_oom_victim(void);

extern unsigned long oom_badness(struct task_struct *p,
		struct mem_cgroup *memcg, const nodemask_t *nodemask,
		unsigned long totalpages);
@@ -62,8 +58,8 @@ extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
			     struct mem_cgroup *memcg, nodemask_t *nodemask,
			     const char *message);

extern bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_flags);
extern void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_flags);
extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);

extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
			       int order, const nodemask_t *nodemask);
@@ -91,17 +87,6 @@ static inline void oom_killer_enable(void)

extern struct task_struct *find_lock_task_mm(struct task_struct *p);

static inline bool task_will_free_mem(struct task_struct *task)
{
	/*
	 * A coredumping process may sleep for an extended period in exit_mm(),
	 * so the oom killer cannot assume that the process will promptly exit
	 * and release memory.
	 */
	return (task->flags & PF_EXITING) &&
		!(task->signal->flags & SIGNAL_GROUP_COREDUMP);
}

extern void dump_tasks(const struct mem_cgroup *memcg,
		const nodemask_t *nodemask);

Loading