Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0568b409 authored by Jens Axboe's avatar Jens Axboe
Browse files

[PATCH] splice: fix bugs in pipe_to_file()



Found by Oleg Nesterov <oleg@tv-sign.ru>, fixed by me.

- Only allow full pages to go to the page cache.
- Check page != buf->page instead of using PIPE_BUF_FLAG_STOLEN.
- Remember to clear 'stolen' if add_to_page_cache() fails.

And as a cleanup on that:

- Make the bottom fall-through logic a little less convoluted. Also make
  the steal path hold an extra reference to the page, so we don't have
  to differentiate between stolen and non-stolen at the end.

Signed-off-by: default avatarJens Axboe <axboe@suse.de>
parent 46e678c9
Loading
Loading
Loading
Loading
+0 −3
Original line number Diff line number Diff line
@@ -99,8 +99,6 @@ static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
{
	struct page *page = buf->page;

	buf->flags &= ~PIPE_BUF_FLAG_STOLEN;

	/*
	 * If nobody else uses this page, and we don't already have a
	 * temporary page, let's keep track of it as a one-deep
@@ -130,7 +128,6 @@ static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
	struct page *page = buf->page;

	if (page_count(page) == 1) {
		buf->flags |= PIPE_BUF_FLAG_STOLEN;
		lock_page(page);
		return 0;
	}
+19 −18
Original line number Diff line number Diff line
@@ -78,7 +78,7 @@ static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
		return 1;
	}

	buf->flags |= PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU;
	buf->flags |= PIPE_BUF_FLAG_LRU;
	return 0;
}

@@ -87,7 +87,7 @@ static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
{
	page_cache_release(buf->page);
	buf->page = NULL;
	buf->flags &= ~(PIPE_BUF_FLAG_STOLEN | PIPE_BUF_FLAG_LRU);
	buf->flags &= ~PIPE_BUF_FLAG_LRU;
}

static void *page_cache_pipe_buf_map(struct file *file,
@@ -587,9 +587,10 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
		this_len = PAGE_CACHE_SIZE - offset;

	/*
	 * Reuse buf page, if SPLICE_F_MOVE is set.
	 * Reuse buf page, if SPLICE_F_MOVE is set and we are doing a full
	 * page.
	 */
	if (sd->flags & SPLICE_F_MOVE) {
	if ((sd->flags & SPLICE_F_MOVE) && this_len == PAGE_CACHE_SIZE) {
		/*
		 * If steal succeeds, buf->page is now pruned from the vm
		 * side (LRU and page cache) and we can reuse it. The page
@@ -604,6 +605,8 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
			goto find_page;
		}

		page_cache_get(page);

		if (!(buf->flags & PIPE_BUF_FLAG_LRU))
			lru_cache_add(page);
	} else {
@@ -662,7 +665,7 @@ find_page:
	} else if (ret)
		goto out;

	if (!(buf->flags & PIPE_BUF_FLAG_STOLEN)) {
	if (buf->page != page) {
		char *dst = kmap_atomic(page, KM_USER0);

		memcpy(dst + offset, src + buf->offset, this_len);
@@ -671,22 +674,20 @@ find_page:
	}

	ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len);
	if (ret == AOP_TRUNCATED_PAGE) {
		page_cache_release(page);
		goto find_page;
	} else if (ret)
		goto out;

	if (!ret) {
		/*
	 * Return the number of bytes written.
		 * Return the number of bytes written and mark page as
		 * accessed, we are now done!
		 */
		ret = this_len;
		mark_page_accessed(page);
		balance_dirty_pages_ratelimited(mapping);
	} else if (ret == AOP_TRUNCATED_PAGE) {
		page_cache_release(page);
		goto find_page;
	}
out:
	if (!(buf->flags & PIPE_BUF_FLAG_STOLEN))
	page_cache_release(page);

	unlock_page(page);
out_nomem:
	buf->ops->unmap(info, buf);
+1 −2
Original line number Diff line number Diff line
@@ -5,8 +5,7 @@

#define PIPE_BUFFERS (16)

#define PIPE_BUF_FLAG_STOLEN	0x01
#define PIPE_BUF_FLAG_LRU	0x02
#define PIPE_BUF_FLAG_LRU	0x01

struct pipe_buffer {
	struct page *page;