Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 750b4987 authored by Nick Piggin's avatar Nick Piggin Committed by Andi Kleen
Browse files

HWPOISON: Refactor truncate to allow direct truncating of page v2



Extract out truncate_inode_page() out of the truncate path so that
it can be used by memory-failure.c

[AK: description, headers, fix typos]
v2: Some white space changes from Fengguang Wu

Signed-off-by: default avatarAndi Kleen <ak@linux.intel.com>
parent 2a7684a2
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -794,6 +794,8 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
extern int vmtruncate(struct inode * inode, loff_t offset);
extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end);

int truncate_inode_page(struct address_space *mapping, struct page *page);

#ifdef CONFIG_MMU
extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
			unsigned long address, unsigned int flags);
+15 −14
Original line number Diff line number Diff line
@@ -93,11 +93,11 @@ EXPORT_SYMBOL(cancel_dirty_page);
 * its lock, b) when a concurrent invalidate_mapping_pages got there first and
 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
 */
static void
static int
truncate_complete_page(struct address_space *mapping, struct page *page)
{
	if (page->mapping != mapping)
		return;
		return -EIO;

	if (page_has_private(page))
		do_invalidatepage(page, 0);
@@ -108,6 +108,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
	remove_from_page_cache(page);
	ClearPageMappedToDisk(page);
	page_cache_release(page);	/* pagecache ref */
	return 0;
}

/*
@@ -135,6 +136,16 @@ invalidate_complete_page(struct address_space *mapping, struct page *page)
	return ret;
}

int truncate_inode_page(struct address_space *mapping, struct page *page)
{
	if (page_mapped(page)) {
		unmap_mapping_range(mapping,
				   (loff_t)page->index << PAGE_CACHE_SHIFT,
				   PAGE_CACHE_SIZE, 0);
	}
	return truncate_complete_page(mapping, page);
}

/**
 * truncate_inode_pages - truncate range of pages specified by start & end byte offsets
 * @mapping: mapping to truncate
@@ -196,12 +207,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
				unlock_page(page);
				continue;
			}
			if (page_mapped(page)) {
				unmap_mapping_range(mapping,
				  (loff_t)page_index<<PAGE_CACHE_SHIFT,
				  PAGE_CACHE_SIZE, 0);
			}
			truncate_complete_page(mapping, page);
			truncate_inode_page(mapping, page);
			unlock_page(page);
		}
		pagevec_release(&pvec);
@@ -238,15 +244,10 @@ void truncate_inode_pages_range(struct address_space *mapping,
				break;
			lock_page(page);
			wait_on_page_writeback(page);
			if (page_mapped(page)) {
				unmap_mapping_range(mapping,
				  (loff_t)page->index<<PAGE_CACHE_SHIFT,
				  PAGE_CACHE_SIZE, 0);
			}
			truncate_inode_page(mapping, page);
			if (page->index > next)
				next = page->index;
			next++;
			truncate_complete_page(mapping, page);
			unlock_page(page);
		}
		pagevec_release(&pvec);