Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 29a8ea4f authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull libnvdimm fixes from Dan Williams:
 "1/ Fixes to the libnvdimm 'pfn' device that establishes a reserved
     area for storing a struct page array.

  2/ Fixes for dax operations on a raw block device to prevent pagecache
     collisions with dax mappings.

  3/ A fix for pfn_t usage in vm_insert_mixed that lead to a null
     pointer de-reference.

  These have received build success notification from the kbuild robot
  across 153 configs and pass the latest ndctl tests"

* 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm:
  phys_to_pfn_t: use phys_addr_t
  mm: fix pfn_t to page conversion in vm_insert_mixed
  block: use DAX for partition table reads
  block: revert runtime dax control of the raw block device
  fs, block: force direct-I/O for dax-enabled block devices
  devm_memremap_pages: fix vmem_altmap lifetime + alignment handling
  libnvdimm, pfn: fix restoring memmap location
  libnvdimm: fix mode determination for e820 devices
parents 36f90b0a 76e9f0ee
Loading
Loading
Loading
Loading
+0 −38
Original line number Diff line number Diff line
@@ -434,42 +434,6 @@ bool blkdev_dax_capable(struct block_device *bdev)

	return true;
}

static int blkdev_daxset(struct block_device *bdev, unsigned long argp)
{
	unsigned long arg;
	int rc = 0;

	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;

	if (get_user(arg, (int __user *)(argp)))
		return -EFAULT;
	arg = !!arg;
	if (arg == !!(bdev->bd_inode->i_flags & S_DAX))
		return 0;

	if (arg)
		arg = S_DAX;

	if (arg && !blkdev_dax_capable(bdev))
		return -ENOTTY;

	inode_lock(bdev->bd_inode);
	if (bdev->bd_map_count == 0)
		inode_set_flags(bdev->bd_inode, arg, S_DAX);
	else
		rc = -EBUSY;
	inode_unlock(bdev->bd_inode);
	return rc;
}
#else
static int blkdev_daxset(struct block_device *bdev, int arg)
{
	if (arg)
		return -ENOTTY;
	return 0;
}
#endif

static int blkdev_flushbuf(struct block_device *bdev, fmode_t mode,
@@ -634,8 +598,6 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
	case BLKTRACESETUP:
	case BLKTRACETEARDOWN:
		return blk_trace_ioctl(bdev, cmd, argp);
	case BLKDAXSET:
		return blkdev_daxset(bdev, arg);
	case BLKDAXGET:
		return put_int(arg, !!(bdev->bd_inode->i_flags & S_DAX));
		break;
+15 −3
Original line number Diff line number Diff line
@@ -16,6 +16,7 @@
#include <linux/kmod.h>
#include <linux/ctype.h>
#include <linux/genhd.h>
#include <linux/dax.h>
#include <linux/blktrace_api.h>

#include "partitions/check.h"
@@ -550,13 +551,24 @@ int invalidate_partitions(struct gendisk *disk, struct block_device *bdev)
	return 0;
}

unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
static struct page *read_pagecache_sector(struct block_device *bdev, sector_t n)
{
	struct address_space *mapping = bdev->bd_inode->i_mapping;
	struct page *page;

	page = read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
	return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
			NULL);
}

unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
{
	struct page *page;

	/* don't populate page cache for dax capable devices */
	if (IS_DAX(bdev->bd_inode))
		page = read_dax_sector(bdev, n);
	else
		page = read_pagecache_sector(bdev, n);

	if (!IS_ERR(page)) {
		if (PageError(page))
			goto fail;
+5 −3
Original line number Diff line number Diff line
@@ -1277,10 +1277,12 @@ static ssize_t mode_show(struct device *dev,

	device_lock(dev);
	claim = ndns->claim;
	if (pmem_should_map_pages(dev) || (claim && is_nd_pfn(claim)))
		mode = "memory";
	else if (claim && is_nd_btt(claim))
	if (claim && is_nd_btt(claim))
		mode = "safe";
	else if (claim && is_nd_pfn(claim))
		mode = "memory";
	else if (!claim && pmem_should_map_pages(dev))
		mode = "memory";
	else
		mode = "raw";
	rc = sprintf(buf, "%s\n", mode);
+1 −3
Original line number Diff line number Diff line
@@ -301,10 +301,8 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)

	switch (le32_to_cpu(pfn_sb->mode)) {
	case PFN_MODE_RAM:
		break;
	case PFN_MODE_PMEM:
		/* TODO: allocate from PMEM support */
		return -ENOTTY;
		break;
	default:
		return -ENXIO;
	}
+0 −28
Original line number Diff line number Diff line
@@ -1736,37 +1736,13 @@ static int blkdev_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
	return __dax_pmd_fault(vma, addr, pmd, flags, blkdev_get_block, NULL);
}

static void blkdev_vm_open(struct vm_area_struct *vma)
{
	struct inode *bd_inode = bdev_file_inode(vma->vm_file);
	struct block_device *bdev = I_BDEV(bd_inode);

	inode_lock(bd_inode);
	bdev->bd_map_count++;
	inode_unlock(bd_inode);
}

static void blkdev_vm_close(struct vm_area_struct *vma)
{
	struct inode *bd_inode = bdev_file_inode(vma->vm_file);
	struct block_device *bdev = I_BDEV(bd_inode);

	inode_lock(bd_inode);
	bdev->bd_map_count--;
	inode_unlock(bd_inode);
}

static const struct vm_operations_struct blkdev_dax_vm_ops = {
	.open		= blkdev_vm_open,
	.close		= blkdev_vm_close,
	.fault		= blkdev_dax_fault,
	.pmd_fault	= blkdev_dax_pmd_fault,
	.pfn_mkwrite	= blkdev_dax_fault,
};

static const struct vm_operations_struct blkdev_default_vm_ops = {
	.open		= blkdev_vm_open,
	.close		= blkdev_vm_close,
	.fault		= filemap_fault,
	.map_pages	= filemap_map_pages,
};
@@ -1774,18 +1750,14 @@ static const struct vm_operations_struct blkdev_default_vm_ops = {
static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
{
	struct inode *bd_inode = bdev_file_inode(file);
	struct block_device *bdev = I_BDEV(bd_inode);

	file_accessed(file);
	inode_lock(bd_inode);
	bdev->bd_map_count++;
	if (IS_DAX(bd_inode)) {
		vma->vm_ops = &blkdev_dax_vm_ops;
		vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
	} else {
		vma->vm_ops = &blkdev_default_vm_ops;
	}
	inode_unlock(bd_inode);

	return 0;
}
Loading