Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c8ad6302 authored by Geliang Tang's avatar Geliang Tang Committed by Linus Torvalds
Browse files

mm/readahead.c, mm/vmscan.c: use lru_to_page instead of list_to_page



list_to_page() in readahead.c is the same as lru_to_page() in vmscan.c.
So I move lru_to_page to internal.h and drop list_to_page().

Signed-off-by: default avatarGeliang Tang <geliangtang@163.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 75469345
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -119,6 +119,8 @@ extern int isolate_lru_page(struct page *page);
extern void putback_lru_page(struct page *page);
extern bool zone_reclaimable(struct zone *zone);

#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))

/*
 * in mm/rmap.c:
 */
+3 −5
Original line number Diff line number Diff line
@@ -32,8 +32,6 @@ file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
}
EXPORT_SYMBOL_GPL(file_ra_state_init);

#define list_to_page(head) (list_entry((head)->prev, struct page, lru))

/*
 * see if a page needs releasing upon read_cache_pages() failure
 * - the caller of read_cache_pages() may have set PG_private or PG_fscache
@@ -64,7 +62,7 @@ static void read_cache_pages_invalidate_pages(struct address_space *mapping,
	struct page *victim;

	while (!list_empty(pages)) {
		victim = list_to_page(pages);
		victim = lru_to_page(pages);
		list_del(&victim->lru);
		read_cache_pages_invalidate_page(mapping, victim);
	}
@@ -87,7 +85,7 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
	int ret = 0;

	while (!list_empty(pages)) {
		page = list_to_page(pages);
		page = lru_to_page(pages);
		list_del(&page->lru);
		if (add_to_page_cache_lru(page, mapping, page->index,
				mapping_gfp_constraint(mapping, GFP_KERNEL))) {
@@ -125,7 +123,7 @@ static int read_pages(struct address_space *mapping, struct file *filp,
	}

	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
		struct page *page = list_to_page(pages);
		struct page *page = lru_to_page(pages);
		list_del(&page->lru);
		if (!add_to_page_cache_lru(page, mapping, page->index,
				mapping_gfp_constraint(mapping, GFP_KERNEL))) {
+0 −2
Original line number Diff line number Diff line
@@ -106,8 +106,6 @@ struct scan_control {
	unsigned long nr_reclaimed;
};

#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))

#ifdef ARCH_HAS_PREFETCH
#define prefetch_prev_lru_page(_page, _base, _field)			\
	do {								\