Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e3bf460f authored by Nate Diller's avatar Nate Diller Committed by Linus Torvalds
Browse files

ntfs: use zero_user_page



Use zero_user_page() instead of open-coding it.

[akpm@linux-foundation.org: kmap-type fixes]
Signed-off-by: default avatarNate Diller <nate.diller@gmail.com>
Acked-by: default avatarAnton Altaparmakov <aia21@cantab.net>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6d690dca
Loading
Loading
Loading
Loading
+9 −27
Original line number Diff line number Diff line
@@ -86,19 +86,15 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
		}
		/* Check for the current buffer head overflowing. */
		if (unlikely(file_ofs + bh->b_size > init_size)) {
			u8 *kaddr;
			int ofs;

			ofs = 0;
			if (file_ofs < init_size)
				ofs = init_size - file_ofs;
			local_irq_save(flags);
			kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ);
			memset(kaddr + bh_offset(bh) + ofs, 0,
					bh->b_size - ofs);
			kunmap_atomic(kaddr, KM_BIO_SRC_IRQ);
			zero_user_page(page, bh_offset(bh) + ofs,
					 bh->b_size - ofs, KM_BIO_SRC_IRQ);
			local_irq_restore(flags);
			flush_dcache_page(page);
		}
	} else {
		clear_buffer_uptodate(bh);
@@ -245,8 +241,7 @@ static int ntfs_read_block(struct page *page)
	rl = NULL;
	nr = i = 0;
	do {
		u8 *kaddr;
		int err;
		int err = 0;

		if (unlikely(buffer_uptodate(bh)))
			continue;
@@ -254,7 +249,6 @@ static int ntfs_read_block(struct page *page)
			arr[nr++] = bh;
			continue;
		}
		err = 0;
		bh->b_bdev = vol->sb->s_bdev;
		/* Is the block within the allowed limits? */
		if (iblock < lblock) {
@@ -340,10 +334,7 @@ static int ntfs_read_block(struct page *page)
		bh->b_blocknr = -1UL;
		clear_buffer_mapped(bh);
handle_zblock:
		kaddr = kmap_atomic(page, KM_USER0);
		memset(kaddr + i * blocksize, 0, blocksize);
		kunmap_atomic(kaddr, KM_USER0);
		flush_dcache_page(page);
		zero_user_page(page, i * blocksize, blocksize, KM_USER0);
		if (likely(!err))
			set_buffer_uptodate(bh);
	} while (i++, iblock++, (bh = bh->b_this_page) != head);
@@ -460,10 +451,7 @@ static int ntfs_readpage(struct file *file, struct page *page)
	 * ok to ignore the compressed flag here.
	 */
	if (unlikely(page->index > 0)) {
		kaddr = kmap_atomic(page, KM_USER0);
		memset(kaddr, 0, PAGE_CACHE_SIZE);
		flush_dcache_page(page);
		kunmap_atomic(kaddr, KM_USER0);
		zero_user_page(page, 0, PAGE_CACHE_SIZE, KM_USER0);
		goto done;
	}
	if (!NInoAttr(ni))
@@ -790,14 +778,10 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
		 * uptodate so it can get discarded by the VM.
		 */
		if (err == -ENOENT || lcn == LCN_ENOENT) {
			u8 *kaddr;

			bh->b_blocknr = -1;
			clear_buffer_dirty(bh);
			kaddr = kmap_atomic(page, KM_USER0);
			memset(kaddr + bh_offset(bh), 0, blocksize);
			kunmap_atomic(kaddr, KM_USER0);
			flush_dcache_page(page);
			zero_user_page(page, bh_offset(bh), blocksize,
					KM_USER0);
			set_buffer_uptodate(bh);
			err = 0;
			continue;
@@ -1422,10 +1406,8 @@ static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
		if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) {
			/* The page straddles i_size. */
			unsigned int ofs = i_size & ~PAGE_CACHE_MASK;
			kaddr = kmap_atomic(page, KM_USER0);
			memset(kaddr + ofs, 0, PAGE_CACHE_SIZE - ofs);
			kunmap_atomic(kaddr, KM_USER0);
			flush_dcache_page(page);
			zero_user_page(page, ofs, PAGE_CACHE_SIZE - ofs,
					KM_USER0);
		}
		/* Handle mst protected attributes. */
		if (NInoMstProtected(ni))
+17 −42
Original line number Diff line number Diff line
@@ -606,11 +606,8 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
					ntfs_submit_bh_for_read(bh);
					*wait_bh++ = bh;
				} else {
					u8 *kaddr = kmap_atomic(page, KM_USER0);
					memset(kaddr + bh_offset(bh), 0,
							blocksize);
					kunmap_atomic(kaddr, KM_USER0);
					flush_dcache_page(page);
					zero_user_page(page, bh_offset(bh),
							blocksize, KM_USER0);
					set_buffer_uptodate(bh);
				}
			}
@@ -685,12 +682,9 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
						ntfs_submit_bh_for_read(bh);
						*wait_bh++ = bh;
					} else {
						u8 *kaddr = kmap_atomic(page,
								KM_USER0);
						memset(kaddr + bh_offset(bh),
								0, blocksize);
						kunmap_atomic(kaddr, KM_USER0);
						flush_dcache_page(page);
						zero_user_page(page,
							bh_offset(bh),
							blocksize, KM_USER0);
						set_buffer_uptodate(bh);
					}
				}
@@ -708,11 +702,8 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
			 */
			if (bh_end <= pos || bh_pos >= end) {
				if (!buffer_uptodate(bh)) {
					u8 *kaddr = kmap_atomic(page, KM_USER0);
					memset(kaddr + bh_offset(bh), 0,
							blocksize);
					kunmap_atomic(kaddr, KM_USER0);
					flush_dcache_page(page);
					zero_user_page(page, bh_offset(bh),
							blocksize, KM_USER0);
					set_buffer_uptodate(bh);
				}
				mark_buffer_dirty(bh);
@@ -751,10 +742,8 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
				if (!buffer_uptodate(bh))
					set_buffer_uptodate(bh);
			} else if (!buffer_uptodate(bh)) {
				u8 *kaddr = kmap_atomic(page, KM_USER0);
				memset(kaddr + bh_offset(bh), 0, blocksize);
				kunmap_atomic(kaddr, KM_USER0);
				flush_dcache_page(page);
				zero_user_page(page, bh_offset(bh), blocksize,
						KM_USER0);
				set_buffer_uptodate(bh);
			}
			continue;
@@ -878,11 +867,8 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
					if (!buffer_uptodate(bh))
						set_buffer_uptodate(bh);
				} else if (!buffer_uptodate(bh)) {
					u8 *kaddr = kmap_atomic(page, KM_USER0);
					memset(kaddr + bh_offset(bh), 0,
							blocksize);
					kunmap_atomic(kaddr, KM_USER0);
					flush_dcache_page(page);
					zero_user_page(page, bh_offset(bh),
							blocksize, KM_USER0);
					set_buffer_uptodate(bh);
				}
				continue;
@@ -1137,16 +1123,12 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
			 * to zero the overflowing region.
			 */
			if (unlikely(bh_pos + blocksize > initialized_size)) {
				u8 *kaddr;
				int ofs = 0;

				if (likely(bh_pos < initialized_size))
					ofs = initialized_size - bh_pos;
				kaddr = kmap_atomic(page, KM_USER0);
				memset(kaddr + bh_offset(bh) + ofs, 0,
						blocksize - ofs);
				kunmap_atomic(kaddr, KM_USER0);
				flush_dcache_page(page);
				zero_user_page(page, bh_offset(bh) + ofs,
						blocksize - ofs, KM_USER0);
			}
		} else /* if (unlikely(!buffer_uptodate(bh))) */
			err = -EIO;
@@ -1286,11 +1268,8 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
				if (PageUptodate(page))
					set_buffer_uptodate(bh);
				else {
					u8 *kaddr = kmap_atomic(page, KM_USER0);
					memset(kaddr + bh_offset(bh), 0,
							blocksize);
					kunmap_atomic(kaddr, KM_USER0);
					flush_dcache_page(page);
					zero_user_page(page, bh_offset(bh),
							blocksize, KM_USER0);
					set_buffer_uptodate(bh);
				}
			}
@@ -1350,9 +1329,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
		len = PAGE_CACHE_SIZE;
		if (len > bytes)
			len = bytes;
		kaddr = kmap_atomic(*pages, KM_USER0);
		memset(kaddr, 0, len);
		kunmap_atomic(kaddr, KM_USER0);
		zero_user_page(*pages, 0, len, KM_USER0);
	}
	goto out;
}
@@ -1473,9 +1450,7 @@ static inline size_t ntfs_copy_from_user_iovec(struct page **pages,
		len = PAGE_CACHE_SIZE;
		if (len > bytes)
			len = bytes;
		kaddr = kmap_atomic(*pages, KM_USER0);
		memset(kaddr, 0, len);
		kunmap_atomic(kaddr, KM_USER0);
		zero_user_page(*pages, 0, len, KM_USER0);
	}
	goto out;
}