Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit deb11639 authored by Vinayak Menon's avatar Vinayak Menon Committed by Shiraz Hashim
Browse files

Revert "mm: prioritize tasks holding CMA pages"



This reverts 'commit e1574295
("mm: prioritize tasks holding CMA pages")'

A race is found in the add/delete to the hash list maintained
for exiting tasks. Reverting for now, since this feature itself
is not of much use right now.

Change-Id: Id86839d6d4af35ff9647bc38b65e4aea61f3909c
Signed-off-by: default avatarVinayak Menon <vinmenon@codeaurora.org>
parent fbfdd7c9
Loading
Loading
Loading
Loading
+0 −5
Original line number Diff line number Diff line
@@ -147,11 +147,6 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
	tlb_flush_mmu_free(tlb);
}

static inline void tlb_free_pages_early(struct mmu_gather *tlb)
{
	tlb_flush_mmu(tlb);
}

static inline void
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
+0 −5
Original line number Diff line number Diff line
@@ -233,11 +233,6 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
	ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
}

static inline void tlb_free_pages_early(struct mmu_gather *tlb)
{
	tlb_flush_mmu(tlb);
}

static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
	if (!__tlb_remove_page(tlb, page))
+0 −4
Original line number Diff line number Diff line
@@ -82,10 +82,6 @@ static inline void tlb_finish_mmu(struct mmu_gather *tlb,
	tlb_flush_mmu(tlb);
}

static inline void tlb_free_pages_early(struct mmu_gather *tlb)
{
}

/*
 * Release the page cache reference for a pte removed by
 * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
+0 −4
Original line number Diff line number Diff line
@@ -93,10 +93,6 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
	check_pgt_cache();
}

static inline void tlb_free_pages_early(struct mmu_gather *tlb)
{
}

/* tlb_remove_page
 *	Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
 *	while handling the additional races in SMP caused by other CPUs
+0 −1
Original line number Diff line number Diff line
@@ -115,7 +115,6 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
void tlb_flush_mmu(struct mmu_gather *tlb);
void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
							unsigned long end);
void tlb_free_pages_early(struct mmu_gather *tlb);
int __tlb_remove_page(struct mmu_gather *tlb, struct page *page);

/* tlb_remove_page
Loading