Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9e0267c2 authored by Jens Axboe's avatar Jens Axboe
Browse files

[PATCH] splice: fixup writeout path after ->map changes



Since ->map() no longer locks the page, we need to adjust the handling
of those pages (and stealing) a little. This now passes full regressions
again.

Signed-off-by: default avatarJens Axboe <axboe@suse.de>
parent a4514ebd
Loading
Loading
Loading
Loading
+30 −19
Original line number Diff line number Diff line
@@ -50,7 +50,8 @@ static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
	struct page *page = buf->page;
	struct address_space *mapping = page_mapping(page);

	WARN_ON(!PageLocked(page));
	lock_page(page);

	WARN_ON(!PageUptodate(page));

	/*
@@ -65,8 +66,10 @@ static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
	if (PagePrivate(page))
		try_to_release_page(page, mapping_gfp_mask(mapping));

	if (!remove_mapping(mapping, page))
	if (!remove_mapping(mapping, page)) {
		unlock_page(page);
		return 1;
	}

	buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU;
	return 0;
@@ -507,14 +510,12 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
	if (sd->flags & SPLICE_F_MOVE) {
		/*
		 * If steal succeeds, buf->page is now pruned from the vm
		 * side (LRU and page cache) and we can reuse it.
		 * side (LRU and page cache) and we can reuse it. The page
		 * will also be looked on successful return.
		 */
		if (buf->ops->steal(info, buf))
			goto find_page;

		/*
		 * this will also set the page locked
		 */
		page = buf->page;
		if (add_to_page_cache(page, mapping, index, gfp_mask))
			goto find_page;
@@ -523,15 +524,27 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
			lru_cache_add(page);
	} else {
find_page:
		page = find_lock_page(mapping, index);
		if (!page) {
			ret = -ENOMEM;
		page = find_or_create_page(mapping, index, gfp_mask);
		if (!page)
			page = page_cache_alloc_cold(mapping);
			if (unlikely(!page))
				goto out_nomem;

			/*
		 * If the page is uptodate, it is also locked. If it isn't
		 * uptodate, we can mark it uptodate if we are filling the
		 * full page. Otherwise we need to read it in first...
			 * This will also lock the page
			 */
			ret = add_to_page_cache_lru(page, mapping, index,
						    gfp_mask);
			if (unlikely(ret))
				goto out;
		}

		/*
		 * We get here with the page locked. If the page is also
		 * uptodate, we don't need to do more. If it isn't, we
		 * may need to bring it in if we are not going to overwrite
		 * the full page.
		 */
		if (!PageUptodate(page)) {
			if (sd->len < PAGE_CACHE_SIZE) {
@@ -553,12 +566,10 @@ find_page:
					ret = -EIO;
					goto out;
				}
			} else {
				WARN_ON(!PageLocked(page));
			} else
				SetPageUptodate(page);
		}
	}
	}

	ret = mapping->a_ops->prepare_write(file, page, 0, sd->len);
	if (ret == AOP_TRUNCATED_PAGE) {
@@ -585,10 +596,10 @@ find_page:
	mark_page_accessed(page);
	balance_dirty_pages_ratelimited(mapping);
out:
	if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
	if (!(buf->flags & PIPE_BUF_FLAG_STOLEN))
		page_cache_release(page);

	unlock_page(page);
	}
out_nomem:
	buf->ops->unmap(info, buf);
	return ret;