Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 994fc28c authored by Zach Brown's avatar Zach Brown Committed by Joel Becker
Browse files

[PATCH] add AOP_TRUNCATED_PAGE, prepend AOP_ to WRITEPAGE_ACTIVATE



readpage(), prepare_write(), and commit_write() callers are updated to
understand the special return code AOP_TRUNCATED_PAGE in the style of
writepage() and WRITEPAGE_ACTIVATE.  AOP_TRUNCATED_PAGE tells the caller that
the callee has unlocked the page and that the operation should be tried again
with a new page.  OCFS2 uses this to detect and work around a lock inversion in
its aop methods.  There should be no change in behaviour for methods that don't
return AOP_TRUNCATED_PAGE.

WRITEPAGE_ACTIVATE is also prepended with AOP_ for consistency and they are
made enums so that kerneldoc can be used to document their semantics.

Signed-off-by: default avatarZach Brown <zach.brown@oracle.com>
parent 7063fbf2
Loading
Loading
Loading
Loading
+18 −5
Original line number Diff line number Diff line
@@ -213,7 +213,7 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
	struct address_space_operations *aops = mapping->a_ops;
	pgoff_t index;
	unsigned offset, bv_offs;
	int len, ret = 0;
	int len, ret;

	down(&mapping->host->i_sem);
	index = pos >> PAGE_CACHE_SHIFT;
@@ -232,9 +232,15 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
		page = grab_cache_page(mapping, index);
		if (unlikely(!page))
			goto fail;
		if (unlikely(aops->prepare_write(file, page, offset,
				offset + size)))
		ret = aops->prepare_write(file, page, offset,
					  offset + size);
		if (unlikely(ret)) {
			if (ret == AOP_TRUNCATED_PAGE) {
				page_cache_release(page);
				continue;
			}
			goto unlock;
		}
		transfer_result = lo_do_transfer(lo, WRITE, page, offset,
				bvec->bv_page, bv_offs, size, IV);
		if (unlikely(transfer_result)) {
@@ -251,9 +257,15 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
			kunmap_atomic(kaddr, KM_USER0);
		}
		flush_dcache_page(page);
		if (unlikely(aops->commit_write(file, page, offset,
				offset + size)))
		ret = aops->commit_write(file, page, offset,
					 offset + size);
		if (unlikely(ret)) {
			if (ret == AOP_TRUNCATED_PAGE) {
				page_cache_release(page);
				continue;
			}
			goto unlock;
		}
		if (unlikely(transfer_result))
			goto unlock;
		bv_offs += size;
@@ -264,6 +276,7 @@ static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
		unlock_page(page);
		page_cache_release(page);
	}
	ret = 0;
out:
	up(&mapping->host->i_sem);
	return ret;
+2 −2
Original line number Diff line number Diff line
@@ -154,7 +154,7 @@ static int ramdisk_commit_write(struct file *file, struct page *page,

/*
 * ->writepage to the the blockdev's mapping has to redirty the page so that the
 * VM doesn't go and steal it.  We return WRITEPAGE_ACTIVATE so that the VM
 * VM doesn't go and steal it.  We return AOP_WRITEPAGE_ACTIVATE so that the VM
 * won't try to (pointlessly) write the page again for a while.
 *
 * Really, these pages should not be on the LRU at all.
@@ -165,7 +165,7 @@ static int ramdisk_writepage(struct page *page, struct writeback_control *wbc)
		make_page_uptodate(page);
	SetPageDirty(page);
	if (wbc->for_reclaim)
		return WRITEPAGE_ACTIVATE;
		return AOP_WRITEPAGE_ACTIVATE;
	unlock_page(page);
	return 0;
}
+1 −1
Original line number Diff line number Diff line
@@ -721,7 +721,7 @@ mpage_writepages(struct address_space *mapping,
						&last_block_in_bio, &ret, wbc,
						page->mapping->a_ops->writepage);
			}
			if (unlikely(ret == WRITEPAGE_ACTIVATE))
			if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE))
				unlock_page(page);
			if (ret || (--(wbc->nr_to_write) <= 0))
				done = 1;
+31 −0
Original line number Diff line number Diff line
@@ -302,6 +302,37 @@ struct iattr {
 */
#include <linux/quota.h>

/** 
 * enum positive_aop_returns - aop return codes with specific semantics
 *
 * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has
 * 			    completed, that the page is still locked, and
 * 			    should be considered active.  The VM uses this hint
 * 			    to return the page to the active list -- it won't
 * 			    be a candidate for writeback again in the near
 * 			    future.  Other callers must be careful to unlock
 * 			    the page if they get this return.  Returned by
 * 			    writepage(); 
 *
 * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has
 *  			unlocked it and the page might have been truncated.
 *  			The caller should back up to acquiring a new page and
 *  			trying again.  The aop will be taking reasonable
 *  			precautions not to livelock.  If the caller held a page
 *  			reference, it should drop it before retrying.  Returned
 *  			by readpage(), prepare_write(), and commit_write().
 *
 * address_space_operation functions return these large constants to indicate
 * special semantics to the caller.  These are much larger than the bytes in a
 * page to allow for functions that return the number of bytes operated on in a
 * given page.
 */

enum positive_aop_returns {
	AOP_WRITEPAGE_ACTIVATE	= 0x80000,
	AOP_TRUNCATED_PAGE	= 0x80001,
};

/*
 * oh the beauties of C type declarations.
 */
+0 −6
Original line number Diff line number Diff line
@@ -59,12 +59,6 @@ struct writeback_control {
	unsigned for_reclaim:1;			/* Invoked from the page allocator */
};

/*
 * ->writepage() return values (make these much larger than a pagesize, in
 * case some fs is returning number-of-bytes-written from writepage)
 */
#define WRITEPAGE_ACTIVATE	0x80000	/* IO was not started: activate page */

/*
 * fs/fs-writeback.c
 */	
Loading