Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6fe6900e authored by Nick Piggin's avatar Nick Piggin Committed by Linus Torvalds
Browse files

mm: make read_cache_page synchronous



Ensure pages are uptodate after returning from read_cache_page, which allows
us to cut out most of the filesystem-internal PageUptodate calls.

I didn't have a great look down the call chains, but this appears to fixes 7
possible use-before uptodate in hfs, 2 in hfsplus, 1 in jfs, a few in
ecryptfs, 1 in jffs2, and a possible cleared data overwritten with readpage in
block2mtd.  All depending on whether the filler is async and/or can return
with a !uptodate page.

Signed-off-by: default avatarNick Piggin <npiggin@suse.de>
Cc: Hugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 714b8171
Loading
Loading
Loading
Loading
+2 −4
Original line number Diff line number Diff line
@@ -42,11 +42,9 @@ static LIST_HEAD(blkmtd_device_list);

static struct page *page_read(struct address_space *mapping, int index)
{
	filler_t *filler = (filler_t*)mapping->a_ops->readpage;
	return read_cache_page(mapping, index, filler, NULL);
	return read_mapping_page(mapping, index, NULL);
}


/* erase a specified part of the device */
static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
{
+0 −3
Original line number Diff line number Diff line
@@ -194,10 +194,7 @@ static struct page *afs_dir_get_page(struct inode *dir, unsigned long index,

	page = read_mapping_page(dir->i_mapping, index, &file);
	if (!IS_ERR(page)) {
		wait_on_page_locked(page);
		kmap(page);
		if (!PageUptodate(page))
			goto fail;
		if (!PageChecked(page))
			afs_dir_check_page(dir, page);
		if (PageError(page))
+4 −7
Original line number Diff line number Diff line
@@ -68,13 +68,11 @@ int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key)
	}

	ret = -EIO;
	wait_on_page_locked(page);
	buf = kmap(page);
	if (!PageUptodate(page))
		goto out_free;
	if (PageError(page))
		goto out_free;

	buf = kmap(page);

	/* examine the symlink's contents */
	size = vnode->status.size;
	_debug("symlink to %*.*s", (int) size, (int) size, buf);
@@ -91,8 +89,8 @@ int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key)

	ret = 0;

out_free:
	kunmap(page);
out_free:
	page_cache_release(page);
out:
	_leave(" = %d", ret);
@@ -171,8 +169,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
	}

	ret = -EIO;
	wait_on_page_locked(page);
	if (!PageUptodate(page) || PageError(page))
	if (PageError(page))
		goto error;

	buf = kmap(page);
+2 −1
Original line number Diff line number Diff line
@@ -180,7 +180,8 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
		struct page *page = NULL;

		if (blocknr + i < devsize) {
			page = read_mapping_page(mapping, blocknr + i, NULL);
			page = read_mapping_page_async(mapping, blocknr + i,
									NULL);
			/* synchronous error? */
			if (IS_ERR(page))
				page = NULL;
+1 −10
Original line number Diff line number Diff line
@@ -46,7 +46,6 @@ struct kmem_cache *ecryptfs_lower_page_cache;
 */
static struct page *ecryptfs_get1page(struct file *file, int index)
{
	struct page *page;
	struct dentry *dentry;
	struct inode *inode;
	struct address_space *mapping;
@@ -54,14 +53,7 @@ static struct page *ecryptfs_get1page(struct file *file, int index)
	dentry = file->f_path.dentry;
	inode = dentry->d_inode;
	mapping = inode->i_mapping;
	page = read_cache_page(mapping, index,
			       (filler_t *)mapping->a_ops->readpage,
			       (void *)file);
	if (IS_ERR(page))
		goto out;
	wait_on_page_locked(page);
out:
	return page;
	return read_mapping_page(mapping, index, (void *)file);
}

static
@@ -233,7 +225,6 @@ int ecryptfs_do_readpage(struct file *file, struct page *page,
		ecryptfs_printk(KERN_ERR, "Error reading from page cache\n");
		goto out;
	}
	wait_on_page_locked(lower_page);
	page_data = kmap_atomic(page, KM_USER0);
	lower_page_data = kmap_atomic(lower_page, KM_USER1);
	memcpy(page_data, lower_page_data, PAGE_CACHE_SIZE);
Loading