Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5e508e1d authored by TARKZiM's avatar TARKZiM
Browse files

Revert "[DNM] mm: Sync with oneplus msm8994 sources"

This reverts commit 3b7ee175.
parent e791269d
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -612,6 +612,12 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
		}

		lowmem_deathpending_timeout = jiffies + HZ;
		/*
		 * FIXME: lowmemorykiller shouldn't abuse global OOM killer
		 * infrastructure. There is no real reason why the selected
		 * task should have access to the memory reserves.
		 */
		mark_tsk_oom_victim(selected);
		send_sig(SIGKILL, selected, 0);
		rem -= selected_tasksize;
		rcu_read_unlock();
+1 −1
Original line number Diff line number Diff line
@@ -1247,7 +1247,7 @@ cont:
			break;
	}
	pte_unmap_unlock(pte - 1, ptl);
	reclaim_pages_from_list(&page_list);
	reclaimed = reclaim_pages_from_list(&page_list, vma);
	rp->nr_reclaimed += reclaimed;
	rp->nr_to_reclaim -= reclaimed;
	if (rp->nr_to_reclaim < 0)
+4 −2
Original line number Diff line number Diff line
@@ -75,7 +75,8 @@ struct page *ksm_might_need_to_copy(struct page *page,

int page_referenced_ksm(struct page *page,
			struct mem_cgroup *memcg, unsigned long *vm_flags);
int try_to_unmap_ksm(struct page *page, enum ttu_flags flags);
int try_to_unmap_ksm(struct page *page,
			enum ttu_flags flags, struct vm_area_struct *vma);
int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
		  struct vm_area_struct *, unsigned long, void *), void *arg);
void ksm_migrate_page(struct page *newpage, struct page *oldpage);
@@ -115,7 +116,8 @@ static inline int page_referenced_ksm(struct page *page,
	return 0;
}

static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
static inline int try_to_unmap_ksm(struct page *page,
			enum ttu_flags flags, struct vm_area_struct *target_vma)
{
	return 0;
}
+0 −9
Original line number Diff line number Diff line
@@ -994,10 +994,6 @@ void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
 * (see the comment on walk_page_range() for more details)
 */
struct mm_walk {
	int (*pgd_entry)(pgd_t *pgd, unsigned long addr,
			 unsigned long next, struct mm_walk *walk);
	int (*pud_entry)(pud_t *pud, unsigned long addr,
	                 unsigned long next, struct mm_walk *walk);
	int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
			 unsigned long next, struct mm_walk *walk);
	int (*pte_entry)(pte_t *pte, unsigned long addr,
@@ -1636,11 +1632,6 @@ void page_cache_async_readahead(struct address_space *mapping,
				pgoff_t offset,
				unsigned long size);

unsigned long max_sane_readahead(unsigned long nr);
unsigned long ra_submit(struct file_ra_state *ra,
			struct address_space *mapping,
			struct file *filp);

extern unsigned long stack_guard_gap;
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
+17 −2
Original line number Diff line number Diff line
@@ -47,6 +47,10 @@ static inline bool oom_task_origin(const struct task_struct *p)
	return !!(p->signal->oom_flags & OOM_FLAG_ORIGIN);
}

extern void mark_tsk_oom_victim(struct task_struct *tsk);

extern void unmark_oom_victim(void);

extern unsigned long oom_badness(struct task_struct *p,
		struct mem_cgroup *memcg, const nodemask_t *nodemask,
		unsigned long totalpages);
@@ -58,8 +62,8 @@ extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
			     struct mem_cgroup *memcg, nodemask_t *nodemask,
			     const char *message);

extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
extern bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_flags);
extern void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_flags);

extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
			       int order, const nodemask_t *nodemask);
@@ -87,6 +91,17 @@ static inline void oom_killer_enable(void)

extern struct task_struct *find_lock_task_mm(struct task_struct *p);

static inline bool task_will_free_mem(struct task_struct *task)
{
	/*
	 * A coredumping process may sleep for an extended period in exit_mm(),
	 * so the oom killer cannot assume that the process will promptly exit
	 * and release memory.
	 */
	return (task->flags & PF_EXITING) &&
		!(task->signal->flags & SIGNAL_GROUP_COREDUMP);
}

extern void dump_tasks(const struct mem_cgroup *memcg,
		const nodemask_t *nodemask);

Loading