Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 02e031cb authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

block: remove REQ_HARDBARRIER



REQ_HARDBARRIER is dead now, so remove the leftovers.  What's left
at this point is:

 - various checks inside the block layer.
 - sanity checks in bio based drivers.
 - now unused bio_empty_barrier helper.
 - Xen blockfront use of BLKIF_OP_WRITE_BARRIER - it's dead for a while,
   but Xen really needs to sort out it's barrier situaton.
 - setting of ordered tags in uas - dead code copied from old scsi
   drivers.
 - scsi different retry for barriers - it's dead and should have been
   removed when flushes were converted to FS requests.
 - blktrace handling of barriers - removed.  Someone who knows blktrace
   better should add support for REQ_FLUSH and REQ_FUA, though.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <jaxboe@fusionio.com>
parent 00e375e7
Loading
Loading
Loading
Loading
+0 −7
Original line number Diff line number Diff line
@@ -1194,13 +1194,6 @@ static int __make_request(struct request_queue *q, struct bio *bio)
	int where = ELEVATOR_INSERT_SORT;
	int rw_flags;

	/* REQ_HARDBARRIER is no more */
	if (WARN_ONCE(bio->bi_rw & REQ_HARDBARRIER,
		"block: HARDBARRIER is deprecated, use FLUSH/FUA instead\n")) {
		bio_endio(bio, -EOPNOTSUPP);
		return 0;
	}

	/*
	 * low level driver can indicate that it wants pages above a
	 * certain limit bounced to low memory (ie for highmem, or even
+2 −2
Original line number Diff line number Diff line
@@ -429,7 +429,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
	q->nr_sorted--;

	boundary = q->end_sector;
	stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED;
	stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
	list_for_each_prev(entry, &q->queue_head) {
		struct request *pos = list_entry_rq(entry);

@@ -691,7 +691,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
void __elv_add_request(struct request_queue *q, struct request *rq, int where,
		       int plug)
{
	if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
	if (rq->cmd_flags & REQ_SOFTBARRIER) {
		/* barriers are scheduling boundary, update end_sector */
		if (rq->cmd_type == REQ_TYPE_FS ||
		    (rq->cmd_flags & REQ_DISCARD)) {
+0 −3
Original line number Diff line number Diff line
@@ -180,9 +180,6 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
		BUG();
		bio_endio(bio, -ENXIO);
		return 0;
	} else if (bio->bi_rw & REQ_HARDBARRIER) {
		bio_endio(bio, -EOPNOTSUPP);
		return 0;
	} else if (bio->bi_io_vec == NULL) {
		printk(KERN_ERR "aoe: bi_io_vec is NULL\n");
		BUG();
+0 −6
Original line number Diff line number Diff line
@@ -481,12 +481,6 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
	if (bio_rw(bio) == WRITE) {
		struct file *file = lo->lo_backing_file;

		/* REQ_HARDBARRIER is deprecated */
		if (bio->bi_rw & REQ_HARDBARRIER) {
			ret = -EOPNOTSUPP;
			goto out;
		}

		if (bio->bi_rw & REQ_FLUSH) {
			ret = vfs_fsync(file, 0);
			if (unlikely(ret && ret != -EINVAL)) {
+0 −2
Original line number Diff line number Diff line
@@ -289,8 +289,6 @@ static int blkif_queue_request(struct request *req)

	ring_req->operation = rq_data_dir(req) ?
		BLKIF_OP_WRITE : BLKIF_OP_READ;
	if (req->cmd_flags & REQ_HARDBARRIER)
		ring_req->operation = BLKIF_OP_WRITE_BARRIER;

	ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
	BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
Loading