Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5a7bbad2 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

block: remove support for bio remapping from ->make_request



There is very little benefit in allowing to let a ->make_request
instance update the bios device and sector and loop around it in
__generic_make_request when we can archive the same through calling
generic_make_request from the driver and letting the loop in
generic_make_request handle it.

Note that various drivers got the return value from ->make_request and
returned non-zero values for errors.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarNeilBrown <neilb@suse.de>
Signed-off-by: default avatarJens Axboe <jaxboe@fusionio.com>
parent c20e8de2
Loading
Loading
Loading
Loading
+1 −2
Original line number Diff line number Diff line
@@ -59,7 +59,7 @@ struct nfhd_device {
	struct gendisk *disk;
};

static int nfhd_make_request(struct request_queue *queue, struct bio *bio)
static void nfhd_make_request(struct request_queue *queue, struct bio *bio)
{
	struct nfhd_device *dev = queue->queuedata;
	struct bio_vec *bvec;
@@ -76,7 +76,6 @@ static int nfhd_make_request(struct request_queue *queue, struct bio *bio)
		sec += len;
	}
	bio_endio(bio, 0);
	return 0;
}

static int nfhd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+2 −6
Original line number Diff line number Diff line
@@ -104,7 +104,7 @@ axon_ram_irq_handler(int irq, void *dev)
 * axon_ram_make_request - make_request() method for block device
 * @queue, @bio: see blk_queue_make_request()
 */
static int
static void
axon_ram_make_request(struct request_queue *queue, struct bio *bio)
{
	struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data;
@@ -113,7 +113,6 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
	struct bio_vec *vec;
	unsigned int transfered;
	unsigned short idx;
	int rc = 0;

	phys_mem = bank->io_addr + (bio->bi_sector << AXON_RAM_SECTOR_SHIFT);
	phys_end = bank->io_addr + bank->size;
@@ -121,8 +120,7 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
	bio_for_each_segment(vec, bio, idx) {
		if (unlikely(phys_mem + vec->bv_len > phys_end)) {
			bio_io_error(bio);
			rc = -ERANGE;
			break;
			return;
		}

		user_mem = page_address(vec->bv_page) + vec->bv_offset;
@@ -135,8 +133,6 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
		transfered += vec->bv_len;
	}
	bio_endio(bio, 0);

	return rc;
}

/**
+62 −91
Original line number Diff line number Diff line
@@ -1211,7 +1211,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
	blk_rq_bio_prep(req->q, req, bio);
}

int blk_queue_bio(struct request_queue *q, struct bio *bio)
void blk_queue_bio(struct request_queue *q, struct bio *bio)
{
	const bool sync = !!(bio->bi_rw & REQ_SYNC);
	struct blk_plug *plug;
@@ -1236,7 +1236,7 @@ int blk_queue_bio(struct request_queue *q, struct bio *bio)
	 * any locks.
	 */
	if (attempt_plug_merge(current, q, bio))
		goto out;
		return;

	spin_lock_irq(q->queue_lock);

@@ -1312,8 +1312,6 @@ get_rq:
out_unlock:
		spin_unlock_irq(q->queue_lock);
	}
out:
	return 0;
}
EXPORT_SYMBOL_GPL(blk_queue_bio);	/* for device mapper only */

@@ -1441,30 +1439,16 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
static inline void __generic_make_request(struct bio *bio)
{
	struct request_queue *q;
	sector_t old_sector;
	int ret, nr_sectors = bio_sectors(bio);
	dev_t old_dev;
	int nr_sectors = bio_sectors(bio);
	int err = -EIO;
	char b[BDEVNAME_SIZE];
	struct hd_struct *part;

	might_sleep();

	if (bio_check_eod(bio, nr_sectors))
		goto end_io;

	/*
	 * Resolve the mapping until finished. (drivers are
	 * still free to implement/resolve their own stacking
	 * by explicitly returning 0)
	 *
	 * NOTE: we don't repeat the blk_size check for each new device.
	 * Stacking drivers are expected to know what they are doing.
	 */
	old_sector = -1;
	old_dev = 0;
	do {
		char b[BDEVNAME_SIZE];
		struct hd_struct *part;

	q = bdev_get_queue(bio->bi_bdev);
	if (unlikely(!q)) {
		printk(KERN_ERR
@@ -1502,12 +1486,6 @@ static inline void __generic_make_request(struct bio *bio)
	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
		goto end_io;

		if (old_sector != -1)
			trace_block_bio_remap(q, bio, old_dev, old_sector);

		old_sector = bio->bi_sector;
		old_dev = bio->bi_bdev->bd_dev;

	if (bio_check_eod(bio, nr_sectors))
		goto end_io;

@@ -1535,18 +1513,11 @@ static inline void __generic_make_request(struct bio *bio)
	if (blk_throtl_bio(q, &bio))
		goto end_io;

		/*
		 * If bio = NULL, bio has been throttled and will be submitted
		 * later.
		 */
	/* if bio = NULL, bio has been throttled and will be submitted later. */
	if (!bio)
			break;

		return;
	trace_block_bio_queue(q, bio);

		ret = q->make_request_fn(q, bio);
	} while (ret);

	q->make_request_fn(q, bio);
	return;

end_io:
+6 −8
Original line number Diff line number Diff line
@@ -159,7 +159,7 @@ aoeblk_release(struct gendisk *disk, fmode_t mode)
	return 0;
}

static int
static void
aoeblk_make_request(struct request_queue *q, struct bio *bio)
{
	struct sk_buff_head queue;
@@ -172,25 +172,25 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
	if (bio == NULL) {
		printk(KERN_ERR "aoe: bio is NULL\n");
		BUG();
		return 0;
		return;
	}
	d = bio->bi_bdev->bd_disk->private_data;
	if (d == NULL) {
		printk(KERN_ERR "aoe: bd_disk->private_data is NULL\n");
		BUG();
		bio_endio(bio, -ENXIO);
		return 0;
		return;
	} else if (bio->bi_io_vec == NULL) {
		printk(KERN_ERR "aoe: bi_io_vec is NULL\n");
		BUG();
		bio_endio(bio, -ENXIO);
		return 0;
		return;
	}
	buf = mempool_alloc(d->bufpool, GFP_NOIO);
	if (buf == NULL) {
		printk(KERN_INFO "aoe: buf allocation failure\n");
		bio_endio(bio, -ENOMEM);
		return 0;
		return;
	}
	memset(buf, 0, sizeof(*buf));
	INIT_LIST_HEAD(&buf->bufs);
@@ -211,7 +211,7 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
		spin_unlock_irqrestore(&d->lock, flags);
		mempool_free(buf, d->bufpool);
		bio_endio(bio, -ENXIO);
		return 0;
		return;
	}

	list_add_tail(&buf->bufs, &d->bufq);
@@ -222,8 +222,6 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)

	spin_unlock_irqrestore(&d->lock, flags);
	aoenet_xmit(&queue);

	return 0;
}

static int
+1 −3
Original line number Diff line number Diff line
@@ -323,7 +323,7 @@ out:
	return err;
}

static int brd_make_request(struct request_queue *q, struct bio *bio)
static void brd_make_request(struct request_queue *q, struct bio *bio)
{
	struct block_device *bdev = bio->bi_bdev;
	struct brd_device *brd = bdev->bd_disk->private_data;
@@ -359,8 +359,6 @@ static int brd_make_request(struct request_queue *q, struct bio *bio)

out:
	bio_endio(bio, err);

	return 0;
}

#ifdef CONFIG_BLK_DEV_XIP
Loading