Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0a70bd43 authored by Dan Williams's avatar Dan Williams Committed by Vishal Verma
Browse files

dax: enable dax in the presence of known media errors (badblocks)



1/ If a mapping overlaps a bad sector fail the request.

2/ Do not opportunistically report more dax-capable capacity than is
   requested when errors present.

Reviewed-by: default avatarJeff Moyer <jmoyer@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
[vishal: fix a conflict with system RAM collision patches]
[vishal: add a 'size' parameter to ->direct_access]
[vishal: fix a conflict with DAX alignment check patches]
Signed-off-by: default avatarVishal Verma <vishal.l.verma@intel.com>
parent 8b3db979
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -143,7 +143,7 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
 */
static long
axon_ram_direct_access(struct block_device *device, sector_t sector,
		       void __pmem **kaddr, pfn_t *pfn)
		       void __pmem **kaddr, pfn_t *pfn, long size)
{
	struct axon_ram_bank *bank = device->bd_disk->private_data;
	loff_t offset = (loff_t)sector << AXON_RAM_SECTOR_SHIFT;
+1 −1
Original line number Diff line number Diff line
@@ -381,7 +381,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,

#ifdef CONFIG_BLK_DEV_RAM_DAX
static long brd_direct_access(struct block_device *bdev, sector_t sector,
			void __pmem **kaddr, pfn_t *pfn)
			void __pmem **kaddr, pfn_t *pfn, long size)
{
	struct brd_device *brd = bdev->bd_disk->private_data;
	struct page *page;
+9 −1
Original line number Diff line number Diff line
@@ -182,14 +182,22 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
}

static long pmem_direct_access(struct block_device *bdev, sector_t sector,
		      void __pmem **kaddr, pfn_t *pfn)
		      void __pmem **kaddr, pfn_t *pfn, long size)
{
	struct pmem_device *pmem = bdev->bd_disk->private_data;
	resource_size_t offset = sector * 512 + pmem->data_offset;

	if (unlikely(is_bad_pmem(&pmem->bb, sector, size)))
		return -EIO;
	*kaddr = pmem->virt_addr + offset;
	*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);

	/*
	 * If badblocks are present, limit known good range to the
	 * requested range.
	 */
	if (unlikely(pmem->bb.count))
		return size;
	return pmem->size - pmem->pfn_pad - offset;
}

+2 −2
Original line number Diff line number Diff line
@@ -31,7 +31,7 @@ static void dcssblk_release(struct gendisk *disk, fmode_t mode);
static blk_qc_t dcssblk_make_request(struct request_queue *q,
						struct bio *bio);
static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
			 void __pmem **kaddr, pfn_t *pfn);
			 void __pmem **kaddr, pfn_t *pfn, long size);

static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";

@@ -883,7 +883,7 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)

static long
dcssblk_direct_access (struct block_device *bdev, sector_t secnum,
			void __pmem **kaddr, pfn_t *pfn)
			void __pmem **kaddr, pfn_t *pfn, long size)
{
	struct dcssblk_dev_info *dev_info;
	unsigned long offset, dev_sz;
+1 −12
Original line number Diff line number Diff line
@@ -29,7 +29,6 @@
#include <linux/log2.h>
#include <linux/cleancache.h>
#include <linux/dax.h>
#include <linux/badblocks.h>
#include <asm/uaccess.h>
#include "internal.h"

@@ -501,7 +500,7 @@ long bdev_direct_access(struct block_device *bdev, struct blk_dax_ctl *dax)
	sector += get_start_sect(bdev);
	if (sector % (PAGE_SIZE / 512))
		return -EINVAL;
	avail = ops->direct_access(bdev, sector, &dax->addr, &dax->pfn);
	avail = ops->direct_access(bdev, sector, &dax->addr, &dax->pfn, size);
	if (!avail)
		return -ERANGE;
	if (avail > 0 && avail & ~PAGE_MASK)
@@ -561,7 +560,6 @@ EXPORT_SYMBOL_GPL(bdev_dax_supported);
 */
bool bdev_dax_capable(struct block_device *bdev)
{
	struct gendisk *disk = bdev->bd_disk;
	struct blk_dax_ctl dax = {
		.size = PAGE_SIZE,
	};
@@ -577,15 +575,6 @@ bool bdev_dax_capable(struct block_device *bdev)
	if (bdev_direct_access(bdev, &dax) < 0)
		return false;

	/*
	 * If the device has known bad blocks, force all I/O through the
	 * driver / page cache.
	 *
	 * TODO: support finer grained dax error handling
	 */
	if (disk->bb && disk->bb->count)
		return false;

	return true;
}

Loading