Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a3b05e8f authored by Jens Axboe's avatar Jens Axboe Committed by Jens Axboe
Browse files

[PATCH] Kill various deprecated/unused block layer defines/functions



Signed-off-by: default avatarJens Axboe <axboe@suse.de>
parent 1ea25ecb
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -1229,7 +1229,6 @@ static inline void complete_buffers(struct bio *bio, int status)
		int nr_sectors = bio_sectors(bio);

		bio->bi_next = NULL;
		blk_finished_io(len);
		bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
		bio = xbh;
	}
+0 −1
Original line number Diff line number Diff line
@@ -989,7 +989,6 @@ static inline void complete_buffers(struct bio *bio, int ok)
		xbh = bio->bi_next;
		bio->bi_next = NULL;
		
		blk_finished_io(nr_sectors);
		bio_endio(bio, nr_sectors << 9, ok ? 0 : -EIO);

		bio = xbh;
+0 −29
Original line number Diff line number Diff line
@@ -578,12 +578,6 @@ static inline void blk_clear_queue_full(struct request_queue *q, int rw)
#define rq_mergeable(rq)	\
	(!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq)))

/*
 * noop, requests are automagically marked as active/inactive by I/O
 * scheduler -- see elv_next_request
 */
#define blk_queue_headactive(q, head_active)

/*
 * q->prep_rq_fn return values
 */
@@ -621,11 +615,6 @@ static inline void blk_queue_bounce(request_queue_t *q, struct bio **bio)
	if ((rq->bio))			\
		for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)

struct sec_size {
	unsigned block_size;
	unsigned block_size_bits;
};

extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
extern void register_disk(struct gendisk *dev);
@@ -690,16 +679,6 @@ extern void end_that_request_last(struct request *, int);
extern void end_request(struct request *req, int uptodate);
extern void blk_complete_request(struct request *);

static inline int rq_all_done(struct request *rq, unsigned int nr_bytes)
{
	if (blk_fs_request(rq))
		return (nr_bytes >= (rq->hard_nr_sectors << 9));
	else if (blk_pc_request(rq))
		return nr_bytes >= rq->data_len;

	return 0;
}

/*
 * end_that_request_first/chunk() takes an uptodate argument. we account
 * any value <= as an io error. 0 means -EIO for compatability reasons,
@@ -807,14 +786,6 @@ static inline int queue_dma_alignment(request_queue_t *q)
	return retval;
}

static inline int bdev_dma_aligment(struct block_device *bdev)
{
	return queue_dma_alignment(bdev_get_queue(bdev));
}

#define blk_finished_io(nsects)	do { } while (0)
#define blk_started_io(nsects)	do { } while (0)

/* assumes size > 256 */
static inline unsigned int blksize_bits(unsigned int size)
{
+0 −1
Original line number Diff line number Diff line
@@ -79,7 +79,6 @@ extern int dir_notify_enable;
#define WRITE 1
#define READA 2		/* read-ahead  - don't block if no resources */
#define SWRITE 3	/* for ll_rw_block() - wait for buffer lock */
#define SPECIAL 4	/* For non-blockdevice requests in request queue */
#define READ_SYNC	(READ | (1 << BIO_RW_SYNC))
#define WRITE_SYNC	(WRITE | (1 << BIO_RW_SYNC))
#define WRITE_BARRIER	((1 << BIO_RW) | (1 << BIO_RW_BARRIER))