Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d30a1100 authored by Wu Fengguang's avatar Wu Fengguang Committed by Linus Torvalds
Browse files

readahead: record mmap read-around states in file_ra_state



Mmap read-around now shares the same code style and data structure with
readahead code.

This also removes do_page_cache_readahead().  Its last user, mmap
read-around, has been changed to call ra_submit().

The no-readahead-if-congested logic is dumped by the way.  Users will be
pretty sensitive about the slow loading of executables.  So it's
unfavorable to disabled mmap read-around on a congested queue.

[akpm@linux-foundation.org: coding-style fixes]
Cc: Nick Piggin <npiggin@suse.de>
Signed-off-by: default avatarFengguang Wu <wfg@mail.ustc.edu.cn>
Cc: Ying Han <yinghan@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2fad6f5d
Loading
Loading
Loading
Loading
+3 −2
Original line number Original line Diff line number Diff line
@@ -1178,8 +1178,6 @@ void task_dirty_inc(struct task_struct *tsk);
#define VM_MAX_READAHEAD	128	/* kbytes */
#define VM_MAX_READAHEAD	128	/* kbytes */
#define VM_MIN_READAHEAD	16	/* kbytes (includes current page) */
#define VM_MIN_READAHEAD	16	/* kbytes (includes current page) */


int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
			pgoff_t offset, unsigned long nr_to_read);
int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
			pgoff_t offset, unsigned long nr_to_read);
			pgoff_t offset, unsigned long nr_to_read);


@@ -1197,6 +1195,9 @@ void page_cache_async_readahead(struct address_space *mapping,
				unsigned long size);
				unsigned long size);


unsigned long max_sane_readahead(unsigned long nr);
unsigned long max_sane_readahead(unsigned long nr);
unsigned long ra_submit(struct file_ra_state *ra,
			struct address_space *mapping,
			struct file *filp);


/* Do stack extension */
/* Do stack extension */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
+7 −5
Original line number Original line Diff line number Diff line
@@ -1488,13 +1488,15 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
	if (ra->mmap_miss > MMAP_LOTSAMISS)
	if (ra->mmap_miss > MMAP_LOTSAMISS)
		return;
		return;


	/*
	 * mmap read-around
	 */
	ra_pages = max_sane_readahead(ra->ra_pages);
	ra_pages = max_sane_readahead(ra->ra_pages);
	if (ra_pages) {
	if (ra_pages) {
		pgoff_t start = 0;
		ra->start = max_t(long, 0, offset - ra_pages/2);

		ra->size = ra_pages;
		if (offset > ra_pages / 2)
		ra->async_size = 0;
			start = offset - ra_pages / 2;
		ra_submit(ra, mapping, file);
		do_page_cache_readahead(mapping, file, start, ra_pages);
	}
	}
}
}


+2 −21
Original line number Original line Diff line number Diff line
@@ -133,15 +133,12 @@ out:
}
}


/*
/*
 * do_page_cache_readahead actually reads a chunk of disk.  It allocates all
 * __do_page_cache_readahead() actually reads a chunk of disk.  It allocates all
 * the pages first, then submits them all for I/O. This avoids the very bad
 * the pages first, then submits them all for I/O. This avoids the very bad
 * behaviour which would occur if page allocations are causing VM writeback.
 * behaviour which would occur if page allocations are causing VM writeback.
 * We really don't want to intermingle reads and writes like that.
 * We really don't want to intermingle reads and writes like that.
 *
 *
 * Returns the number of pages requested, or the maximum amount of I/O allowed.
 * Returns the number of pages requested, or the maximum amount of I/O allowed.
 *
 * do_page_cache_readahead() returns -1 if it encountered request queue
 * congestion.
 */
 */
static int
static int
__do_page_cache_readahead(struct address_space *mapping, struct file *filp,
__do_page_cache_readahead(struct address_space *mapping, struct file *filp,
@@ -231,22 +228,6 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
	return ret;
	return ret;
}
}


/*
 * This version skips the IO if the queue is read-congested, and will tell the
 * block layer to abandon the readahead if request allocation would block.
 *
 * force_page_cache_readahead() will ignore queue congestion and will block on
 * request queues.
 */
int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
			pgoff_t offset, unsigned long nr_to_read)
{
	if (bdi_read_congested(mapping->backing_dev_info))
		return -1;

	return __do_page_cache_readahead(mapping, filp, offset, nr_to_read, 0);
}

/*
/*
 * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
 * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
 * sensible upper limit.
 * sensible upper limit.
@@ -260,7 +241,7 @@ unsigned long max_sane_readahead(unsigned long nr)
/*
/*
 * Submit IO for the read-ahead request in file_ra_state.
 * Submit IO for the read-ahead request in file_ra_state.
 */
 */
static unsigned long ra_submit(struct file_ra_state *ra,
unsigned long ra_submit(struct file_ra_state *ra,
		       struct address_space *mapping, struct file *filp)
		       struct address_space *mapping, struct file *filp)
{
{
	int actual;
	int actual;