Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d4b29fd7 authored by Dan Williams's avatar Dan Williams
Browse files

block: remove block_device_operations ->direct_access()



Now that all the producers and consumers of dax interfaces have been
converted to using dax_operations on a dax_device, remove the block
device direct_access enabling.

Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 2093f2e9
Loading
Loading
Loading
Loading
+4 −19
Original line number Diff line number Diff line
@@ -139,6 +139,10 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
	return BLK_QC_T_NONE;
}

static const struct block_device_operations axon_ram_devops = {
	.owner		= THIS_MODULE,
};

static long
__axon_ram_direct_access(struct axon_ram_bank *bank, pgoff_t pgoff, long nr_pages,
		       void **kaddr, pfn_t *pfn)
@@ -150,25 +154,6 @@ __axon_ram_direct_access(struct axon_ram_bank *bank, pgoff_t pgoff, long nr_page
	return (bank->size - offset) / PAGE_SIZE;
}

/**
 * axon_ram_direct_access - direct_access() method for block device
 * @device, @sector, @data: see block_device_operations method
 */
static long
axon_ram_blk_direct_access(struct block_device *device, sector_t sector,
		       void **kaddr, pfn_t *pfn, long size)
{
	struct axon_ram_bank *bank = device->bd_disk->private_data;

	return __axon_ram_direct_access(bank, (sector * 512) / PAGE_SIZE,
			size / PAGE_SIZE, kaddr, pfn) * PAGE_SIZE;
}

static const struct block_device_operations axon_ram_devops = {
	.owner		= THIS_MODULE,
	.direct_access	= axon_ram_blk_direct_access
};

static long
axon_ram_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
		       void **kaddr, pfn_t *pfn)
+0 −15
Original line number Diff line number Diff line
@@ -395,18 +395,6 @@ static long __brd_direct_access(struct brd_device *brd, pgoff_t pgoff,
	return 1;
}

static long brd_blk_direct_access(struct block_device *bdev, sector_t sector,
			void **kaddr, pfn_t *pfn, long size)
{
	struct brd_device *brd = bdev->bd_disk->private_data;
	long nr_pages = __brd_direct_access(brd, PHYS_PFN(sector * 512),
			PHYS_PFN(size), kaddr, pfn);

	if (nr_pages < 0)
		return nr_pages;
	return nr_pages * PAGE_SIZE;
}

static long brd_dax_direct_access(struct dax_device *dax_dev,
		pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
{
@@ -418,14 +406,11 @@ static long brd_dax_direct_access(struct dax_device *dax_dev,
static const struct dax_operations brd_dax_ops = {
	.direct_access = brd_dax_direct_access,
};
#else
#define brd_blk_direct_access NULL
#endif

static const struct block_device_operations brd_fops = {
	.owner =		THIS_MODULE,
	.rw_page =		brd_rw_page,
	.direct_access =	brd_blk_direct_access,
};

/*
+0 −13
Original line number Diff line number Diff line
@@ -957,18 +957,6 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
	return ret;
}

static long dm_blk_direct_access(struct block_device *bdev, sector_t sector,
		void **kaddr, pfn_t *pfn, long size)
{
	struct mapped_device *md = bdev->bd_disk->private_data;
	struct dax_device *dax_dev = md->dax_dev;
	long nr_pages = size / PAGE_SIZE;

	nr_pages = dm_dax_direct_access(dax_dev, sector / PAGE_SECTORS,
			nr_pages, kaddr, pfn);
	return nr_pages < 0 ? nr_pages : nr_pages * PAGE_SIZE;
}

/*
 * A target may call dm_accept_partial_bio only from the map routine.  It is
 * allowed for all bio types except REQ_PREFLUSH.
@@ -2823,7 +2811,6 @@ static const struct block_device_operations dm_blk_dops = {
	.open = dm_blk_open,
	.release = dm_blk_close,
	.ioctl = dm_blk_ioctl,
	.direct_access = dm_blk_direct_access,
	.getgeo = dm_blk_getgeo,
	.pr_ops = &dm_pr_ops,
	.owner = THIS_MODULE
+0 −10
Original line number Diff line number Diff line
@@ -220,19 +220,9 @@ __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
	return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
}

static long pmem_blk_direct_access(struct block_device *bdev, sector_t sector,
		void **kaddr, pfn_t *pfn, long size)
{
	struct pmem_device *pmem = bdev->bd_queue->queuedata;

	return __pmem_direct_access(pmem, PHYS_PFN(sector * 512),
			PHYS_PFN(size), kaddr, pfn);
}

static const struct block_device_operations pmem_fops = {
	.owner =		THIS_MODULE,
	.rw_page =		pmem_rw_page,
	.direct_access =	pmem_blk_direct_access,
	.revalidate_disk =	nvdimm_revalidate_disk,
};

+0 −16
Original line number Diff line number Diff line
@@ -31,8 +31,6 @@ static int dcssblk_open(struct block_device *bdev, fmode_t mode);
static void dcssblk_release(struct gendisk *disk, fmode_t mode);
static blk_qc_t dcssblk_make_request(struct request_queue *q,
						struct bio *bio);
static long dcssblk_blk_direct_access(struct block_device *bdev, sector_t secnum,
			 void **kaddr, pfn_t *pfn, long size);
static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
		long nr_pages, void **kaddr, pfn_t *pfn);

@@ -43,7 +41,6 @@ static const struct block_device_operations dcssblk_devops = {
	.owner   	= THIS_MODULE,
	.open    	= dcssblk_open,
	.release 	= dcssblk_release,
	.direct_access 	= dcssblk_blk_direct_access,
};

static const struct dax_operations dcssblk_dax_ops = {
@@ -915,19 +912,6 @@ __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
	return (dev_sz - offset) / PAGE_SIZE;
}

static long
dcssblk_blk_direct_access(struct block_device *bdev, sector_t secnum,
			void **kaddr, pfn_t *pfn, long size)
{
	struct dcssblk_dev_info *dev_info;

	dev_info = bdev->bd_disk->private_data;
	if (!dev_info)
		return -ENODEV;
	return __dcssblk_direct_access(dev_info, PHYS_PFN(secnum * 512),
			PHYS_PFN(size), kaddr, pfn) * PAGE_SIZE;
}

static long
dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
		long nr_pages, void **kaddr, pfn_t *pfn)
Loading