Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3d875182 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "6 fixes"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  sh: add copy_user_page() alias for __copy_user()
  lib/Kconfig: ZLIB_DEFLATE must select BITREVERSE
  mm, dax: fix DAX deadlocks
  memcg: convert threshold to bytes
  builddeb: remove debian/files before build
  mm, fs: obey gfp_mapping for add_to_page_cache()
parents 69984b64 934ed25e
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -59,6 +59,7 @@ pages_do_alias(unsigned long addr1, unsigned long addr2)

#define clear_page(page)	memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, void *from);
#define copy_user_page(to, from, vaddr, pg)  __copy_user(to, from, PAGE_SIZE)

struct page;
struct vm_area_struct;
+1 −1
Original line number Diff line number Diff line
@@ -224,7 +224,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)

		prefetchw(&page->flags);
		ret = add_to_page_cache_lru(page, inode->i_mapping, offset,
					    GFP_KERNEL);
					    GFP_NOFS);
		if (ret == 0) {
			unlock_page(page);
		} else {
+3 −3
Original line number Diff line number Diff line
@@ -3380,6 +3380,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
	struct page *page, *tpage;
	unsigned int expected_index;
	int rc;
	gfp_t gfp = GFP_KERNEL & mapping_gfp_mask(mapping);

	INIT_LIST_HEAD(tmplist);

@@ -3392,7 +3393,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
	 */
	__set_page_locked(page);
	rc = add_to_page_cache_locked(page, mapping,
				      page->index, GFP_KERNEL);
				      page->index, gfp);

	/* give up if we can't stick it in the cache */
	if (rc) {
@@ -3418,8 +3419,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
			break;

		__set_page_locked(page);
		if (add_to_page_cache_locked(page, mapping, page->index,
								GFP_KERNEL)) {
		if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
			__clear_page_locked(page);
			break;
		}
+29 −41
Original line number Diff line number Diff line
@@ -285,6 +285,7 @@ static int copy_user_bh(struct page *to, struct buffer_head *bh,
static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
			struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct address_space *mapping = inode->i_mapping;
	sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
	unsigned long vaddr = (unsigned long)vmf->virtual_address;
	void __pmem *addr;
@@ -292,6 +293,8 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
	pgoff_t size;
	int error;

	i_mmap_lock_read(mapping);

	/*
	 * Check truncate didn't happen while we were allocating a block.
	 * If it did, this block may or may not be still allocated to the
@@ -321,6 +324,8 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
	error = vm_insert_mixed(vma, vaddr, pfn);

 out:
	i_mmap_unlock_read(mapping);

	return error;
}

@@ -382,17 +387,15 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
			 * from a read fault and we've raced with a truncate
			 */
			error = -EIO;
			goto unlock;
			goto unlock_page;
		}
	} else {
		i_mmap_lock_write(mapping);
	}

	error = get_block(inode, block, &bh, 0);
	if (!error && (bh.b_size < PAGE_SIZE))
		error = -EIO;		/* fs corruption? */
	if (error)
		goto unlock;
		goto unlock_page;

	if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
		if (vmf->flags & FAULT_FLAG_WRITE) {
@@ -403,9 +406,8 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
			if (!error && (bh.b_size < PAGE_SIZE))
				error = -EIO;
			if (error)
				goto unlock;
				goto unlock_page;
		} else {
			i_mmap_unlock_write(mapping);
			return dax_load_hole(mapping, page, vmf);
		}
	}
@@ -417,15 +419,17 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
		else
			clear_user_highpage(new_page, vaddr);
		if (error)
			goto unlock;
			goto unlock_page;
		vmf->page = page;
		if (!page) {
			i_mmap_lock_read(mapping);
			/* Check we didn't race with truncate */
			size = (i_size_read(inode) + PAGE_SIZE - 1) >>
								PAGE_SHIFT;
			if (vmf->pgoff >= size) {
				i_mmap_unlock_read(mapping);
				error = -EIO;
				goto unlock;
				goto out;
			}
		}
		return VM_FAULT_LOCKED;
@@ -461,8 +465,6 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
			WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
	}

	if (!page)
		i_mmap_unlock_write(mapping);
 out:
	if (error == -ENOMEM)
		return VM_FAULT_OOM | major;
@@ -471,14 +473,11 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
		return VM_FAULT_SIGBUS | major;
	return VM_FAULT_NOPAGE | major;

 unlock:
 unlock_page:
	if (page) {
		unlock_page(page);
		page_cache_release(page);
	} else {
		i_mmap_unlock_write(mapping);
	}

	goto out;
}
EXPORT_SYMBOL(__dax_fault);
@@ -556,10 +555,10 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
	block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);

	bh.b_size = PMD_SIZE;
	i_mmap_lock_write(mapping);
	length = get_block(inode, block, &bh, write);
	if (length)
		return VM_FAULT_SIGBUS;
	i_mmap_lock_read(mapping);

	/*
	 * If the filesystem isn't willing to tell us the length of a hole,
@@ -569,36 +568,14 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
	if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE)
		goto fallback;

	sector = bh.b_blocknr << (blkbits - 9);

	if (buffer_unwritten(&bh) || buffer_new(&bh)) {
		int i;

		length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn,
						bh.b_size);
		if (length < 0) {
			result = VM_FAULT_SIGBUS;
			goto out;
		}
		if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR))
			goto fallback;

		for (i = 0; i < PTRS_PER_PMD; i++)
			clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE);
		wmb_pmem();
		count_vm_event(PGMAJFAULT);
		mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
		result |= VM_FAULT_MAJOR;
	}

	/*
	 * If we allocated new storage, make sure no process has any
	 * zero pages covering this hole
	 */
	if (buffer_new(&bh)) {
		i_mmap_unlock_write(mapping);
		i_mmap_unlock_read(mapping);
		unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0);
		i_mmap_lock_write(mapping);
		i_mmap_lock_read(mapping);
	}

	/*
@@ -635,6 +612,7 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
		result = VM_FAULT_NOPAGE;
		spin_unlock(ptl);
	} else {
		sector = bh.b_blocknr << (blkbits - 9);
		length = bdev_direct_access(bh.b_bdev, sector, &kaddr, &pfn,
						bh.b_size);
		if (length < 0) {
@@ -644,15 +622,25 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
		if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR))
			goto fallback;

		if (buffer_unwritten(&bh) || buffer_new(&bh)) {
			int i;
			for (i = 0; i < PTRS_PER_PMD; i++)
				clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE);
			wmb_pmem();
			count_vm_event(PGMAJFAULT);
			mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
			result |= VM_FAULT_MAJOR;
		}

		result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write);
	}

 out:
	i_mmap_unlock_read(mapping);

	if (buffer_unwritten(&bh))
		complete_unwritten(&bh, !(result & VM_FAULT_ERROR));

	i_mmap_unlock_write(mapping);

	return result;

 fallback:
+2 −2
Original line number Diff line number Diff line
@@ -165,8 +165,8 @@ int ext4_mpage_readpages(struct address_space *mapping,
		if (pages) {
			page = list_entry(pages->prev, struct page, lru);
			list_del(&page->lru);
			if (add_to_page_cache_lru(page, mapping,
						  page->index, GFP_KERNEL))
			if (add_to_page_cache_lru(page, mapping, page->index,
					GFP_KERNEL & mapping_gfp_mask(mapping)))
				goto next_page;
		}

Loading