Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f2fff596 authored by Nate Diller's avatar Nate Diller Committed by Linus Torvalds
Browse files

reiserfs: use zero_user_page



Use zero_user_page() instead of open-coding it.

Signed-off-by: default avatarNate Diller <nate.diller@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0c11d7a9
Loading
Loading
Loading
Loading
+11 −28
Original line number Diff line number Diff line
@@ -1059,20 +1059,12 @@ static int reiserfs_prepare_file_region_for_write(struct inode *inode
	   maping blocks, since there is none, so we just zero out remaining
	   parts of first and last pages in write area (if needed) */
	if ((pos & ~((loff_t) PAGE_CACHE_SIZE - 1)) > inode->i_size) {
		if (from != 0) {	/* First page needs to be partially zeroed */
			char *kaddr = kmap_atomic(prepared_pages[0], KM_USER0);
			memset(kaddr, 0, from);
			kunmap_atomic(kaddr, KM_USER0);
			flush_dcache_page(prepared_pages[0]);
		}
		if (to != PAGE_CACHE_SIZE) {	/* Last page needs to be partially zeroed */
			char *kaddr =
			    kmap_atomic(prepared_pages[num_pages - 1],
					KM_USER0);
			memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
			kunmap_atomic(kaddr, KM_USER0);
			flush_dcache_page(prepared_pages[num_pages - 1]);
		}
		if (from != 0)		/* First page needs to be partially zeroed */
			zero_user_page(prepared_pages[0], 0, from, KM_USER0);

		if (to != PAGE_CACHE_SIZE)	/* Last page needs to be partially zeroed */
			zero_user_page(prepared_pages[num_pages-1], to,
					PAGE_CACHE_SIZE - to, KM_USER0);

		/* Since all blocks are new - use already calculated value */
		return blocks;
@@ -1199,13 +1191,9 @@ static int reiserfs_prepare_file_region_for_write(struct inode *inode
					ll_rw_block(READ, 1, &bh);
					*wait_bh++ = bh;
				} else {	/* Not mapped, zero it */
					char *kaddr =
					    kmap_atomic(prepared_pages[0],
							KM_USER0);
					memset(kaddr + block_start, 0,
					       from - block_start);
					kunmap_atomic(kaddr, KM_USER0);
					flush_dcache_page(prepared_pages[0]);
					zero_user_page(prepared_pages[0],
						       block_start,
						       from - block_start, KM_USER0);
					set_buffer_uptodate(bh);
				}
			}
@@ -1237,13 +1225,8 @@ static int reiserfs_prepare_file_region_for_write(struct inode *inode
					ll_rw_block(READ, 1, &bh);
					*wait_bh++ = bh;
				} else {	/* Not mapped, zero it */
					char *kaddr =
					    kmap_atomic(prepared_pages
							[num_pages - 1],
							KM_USER0);
					memset(kaddr + to, 0, block_end - to);
					kunmap_atomic(kaddr, KM_USER0);
					flush_dcache_page(prepared_pages[num_pages - 1]);
					zero_user_page(prepared_pages[num_pages-1],
							to, block_end - to, KM_USER0);
					set_buffer_uptodate(bh);
				}
			}
+2 −11
Original line number Diff line number Diff line
@@ -2148,13 +2148,8 @@ int reiserfs_truncate_file(struct inode *p_s_inode, int update_timestamps)
		length = offset & (blocksize - 1);
		/* if we are not on a block boundary */
		if (length) {
			char *kaddr;

			length = blocksize - length;
			kaddr = kmap_atomic(page, KM_USER0);
			memset(kaddr + offset, 0, length);
			flush_dcache_page(page);
			kunmap_atomic(kaddr, KM_USER0);
			zero_user_page(page, offset, length, KM_USER0);
			if (buffer_mapped(bh) && bh->b_blocknr != 0) {
				mark_buffer_dirty(bh);
			}
@@ -2370,7 +2365,6 @@ static int reiserfs_write_full_page(struct page *page,
	 ** last byte in the file
	 */
	if (page->index >= end_index) {
		char *kaddr;
		unsigned last_offset;

		last_offset = inode->i_size & (PAGE_CACHE_SIZE - 1);
@@ -2379,10 +2373,7 @@ static int reiserfs_write_full_page(struct page *page,
			unlock_page(page);
			return 0;
		}
		kaddr = kmap_atomic(page, KM_USER0);
		memset(kaddr + last_offset, 0, PAGE_CACHE_SIZE - last_offset);
		flush_dcache_page(page);
		kunmap_atomic(kaddr, KM_USER0);
		zero_user_page(page, last_offset, PAGE_CACHE_SIZE - last_offset, KM_USER0);
	}
	bh = head;
	block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits);