Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dd4c133f authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe
Browse files

block: rename barrier/ordered to flush



With ordering requirements dropped, barrier and ordered are misnomers.
Now all block layer does is sequencing FLUSH and FUA.  Rename them to
flush.

Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: default avatarJens Axboe <jaxboe@fusionio.com>
parent 8839a0e0
Loading
Loading
Loading
Loading
+10 −11
Original line number Original line Diff line number Diff line
@@ -136,7 +136,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
{
{
	struct request_queue *q = rq->q;
	struct request_queue *q = rq->q;


	if (&q->bar_rq != rq) {
	if (&q->flush_rq != rq) {
		if (error)
		if (error)
			clear_bit(BIO_UPTODATE, &bio->bi_flags);
			clear_bit(BIO_UPTODATE, &bio->bi_flags);
		else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
		else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
@@ -160,13 +160,12 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
		if (bio->bi_size == 0)
		if (bio->bi_size == 0)
			bio_endio(bio, error);
			bio_endio(bio, error);
	} else {
	} else {

		/*
		/*
		 * Okay, this is the barrier request in progress, just
		 * Okay, this is the sequenced flush request in
		 * record the error;
		 * progress, just record the error;
		 */
		 */
		if (error && !q->orderr)
		if (error && !q->flush_err)
			q->orderr = error;
			q->flush_err = error;
	}
	}
}
}


@@ -520,7 +519,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
	init_timer(&q->unplug_timer);
	init_timer(&q->unplug_timer);
	setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
	setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
	INIT_LIST_HEAD(&q->timeout_list);
	INIT_LIST_HEAD(&q->timeout_list);
	INIT_LIST_HEAD(&q->pending_barriers);
	INIT_LIST_HEAD(&q->pending_flushes);
	INIT_WORK(&q->unplug_work, blk_unplug_work);
	INIT_WORK(&q->unplug_work, blk_unplug_work);


	kobject_init(&q->kobj, &blk_queue_ktype);
	kobject_init(&q->kobj, &blk_queue_ktype);
@@ -1764,11 +1763,11 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
static void blk_account_io_done(struct request *req)
static void blk_account_io_done(struct request *req)
{
{
	/*
	/*
	 * Account IO completion.  bar_rq isn't accounted as a normal
	 * Account IO completion.  flush_rq isn't accounted as a
	 * IO on queueing nor completion.  Accounting the containing
	 * normal IO on queueing nor completion.  Accounting the
	 * request is enough.
	 * containing request is enough.
	 */
	 */
	if (blk_do_io_stat(req) && req != &req->q->bar_rq) {
	if (blk_do_io_stat(req) && req != &req->q->flush_rq) {
		unsigned long duration = jiffies - req->start_time;
		unsigned long duration = jiffies - req->start_time;
		const int rw = rq_data_dir(req);
		const int rw = rq_data_dir(req);
		struct hd_struct *part;
		struct hd_struct *part;
+48 −50
Original line number Original line Diff line number Diff line
@@ -9,41 +9,38 @@


#include "blk.h"
#include "blk.h"


static struct request *queue_next_ordseq(struct request_queue *q);
static struct request *queue_next_fseq(struct request_queue *q);


/*
unsigned blk_flush_cur_seq(struct request_queue *q)
 * Cache flushing for ordered writes handling
 */
unsigned blk_ordered_cur_seq(struct request_queue *q)
{
{
	if (!q->ordseq)
	if (!q->flush_seq)
		return 0;
		return 0;
	return 1 << ffz(q->ordseq);
	return 1 << ffz(q->flush_seq);
}
}


static struct request *blk_ordered_complete_seq(struct request_queue *q,
static struct request *blk_flush_complete_seq(struct request_queue *q,
					      unsigned seq, int error)
					      unsigned seq, int error)
{
{
	struct request *next_rq = NULL;
	struct request *next_rq = NULL;


	if (error && !q->orderr)
	if (error && !q->flush_err)
		q->orderr = error;
		q->flush_err = error;


	BUG_ON(q->ordseq & seq);
	BUG_ON(q->flush_seq & seq);
	q->ordseq |= seq;
	q->flush_seq |= seq;


	if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE) {
	if (blk_flush_cur_seq(q) != QUEUE_FSEQ_DONE) {
		/* not complete yet, queue the next ordered sequence */
		/* not complete yet, queue the next flush sequence */
		next_rq = queue_next_ordseq(q);
		next_rq = queue_next_fseq(q);
	} else {
	} else {
		/* complete this barrier request */
		/* complete this flush request */
		__blk_end_request_all(q->orig_bar_rq, q->orderr);
		__blk_end_request_all(q->orig_flush_rq, q->flush_err);
		q->orig_bar_rq = NULL;
		q->orig_flush_rq = NULL;
		q->ordseq = 0;
		q->flush_seq = 0;


		/* dispatch the next barrier if there's one */
		/* dispatch the next flush if there's one */
		if (!list_empty(&q->pending_barriers)) {
		if (!list_empty(&q->pending_flushes)) {
			next_rq = list_entry_rq(q->pending_barriers.next);
			next_rq = list_entry_rq(q->pending_flushes.next);
			list_move(&next_rq->queuelist, &q->queue_head);
			list_move(&next_rq->queuelist, &q->queue_head);
		}
		}
	}
	}
@@ -53,19 +50,19 @@ static struct request *blk_ordered_complete_seq(struct request_queue *q,
static void pre_flush_end_io(struct request *rq, int error)
static void pre_flush_end_io(struct request *rq, int error)
{
{
	elv_completed_request(rq->q, rq);
	elv_completed_request(rq->q, rq);
	blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
	blk_flush_complete_seq(rq->q, QUEUE_FSEQ_PREFLUSH, error);
}
}


static void bar_end_io(struct request *rq, int error)
static void flush_data_end_io(struct request *rq, int error)
{
{
	elv_completed_request(rq->q, rq);
	elv_completed_request(rq->q, rq);
	blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
	blk_flush_complete_seq(rq->q, QUEUE_FSEQ_DATA, error);
}
}


static void post_flush_end_io(struct request *rq, int error)
static void post_flush_end_io(struct request *rq, int error)
{
{
	elv_completed_request(rq->q, rq);
	elv_completed_request(rq->q, rq);
	blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
	blk_flush_complete_seq(rq->q, QUEUE_FSEQ_POSTFLUSH, error);
}
}


static void queue_flush(struct request_queue *q, struct request *rq,
static void queue_flush(struct request_queue *q, struct request *rq,
@@ -74,34 +71,34 @@ static void queue_flush(struct request_queue *q, struct request *rq,
	blk_rq_init(q, rq);
	blk_rq_init(q, rq);
	rq->cmd_type = REQ_TYPE_FS;
	rq->cmd_type = REQ_TYPE_FS;
	rq->cmd_flags = REQ_FLUSH;
	rq->cmd_flags = REQ_FLUSH;
	rq->rq_disk = q->orig_bar_rq->rq_disk;
	rq->rq_disk = q->orig_flush_rq->rq_disk;
	rq->end_io = end_io;
	rq->end_io = end_io;


	elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
	elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
}
}


static struct request *queue_next_ordseq(struct request_queue *q)
static struct request *queue_next_fseq(struct request_queue *q)
{
{
	struct request *rq = &q->bar_rq;
	struct request *rq = &q->flush_rq;


	switch (blk_ordered_cur_seq(q)) {
	switch (blk_flush_cur_seq(q)) {
	case QUEUE_ORDSEQ_PREFLUSH:
	case QUEUE_FSEQ_PREFLUSH:
		queue_flush(q, rq, pre_flush_end_io);
		queue_flush(q, rq, pre_flush_end_io);
		break;
		break;


	case QUEUE_ORDSEQ_BAR:
	case QUEUE_FSEQ_DATA:
		/* initialize proxy request and queue it */
		/* initialize proxy request and queue it */
		blk_rq_init(q, rq);
		blk_rq_init(q, rq);
		init_request_from_bio(rq, q->orig_bar_rq->bio);
		init_request_from_bio(rq, q->orig_flush_rq->bio);
		rq->cmd_flags &= ~REQ_HARDBARRIER;
		rq->cmd_flags &= ~REQ_HARDBARRIER;
		if (q->ordered & QUEUE_ORDERED_DO_FUA)
		if (q->ordered & QUEUE_ORDERED_DO_FUA)
			rq->cmd_flags |= REQ_FUA;
			rq->cmd_flags |= REQ_FUA;
		rq->end_io = bar_end_io;
		rq->end_io = flush_data_end_io;


		elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
		elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
		break;
		break;


	case QUEUE_ORDSEQ_POSTFLUSH:
	case QUEUE_FSEQ_POSTFLUSH:
		queue_flush(q, rq, post_flush_end_io);
		queue_flush(q, rq, post_flush_end_io);
		break;
		break;


@@ -111,19 +108,20 @@ static struct request *queue_next_ordseq(struct request_queue *q)
	return rq;
	return rq;
}
}


struct request *blk_do_ordered(struct request_queue *q, struct request *rq)
struct request *blk_do_flush(struct request_queue *q, struct request *rq)
{
{
	unsigned skip = 0;
	unsigned skip = 0;


	if (!(rq->cmd_flags & REQ_HARDBARRIER))
	if (!(rq->cmd_flags & REQ_HARDBARRIER))
		return rq;
		return rq;


	if (q->ordseq) {
	if (q->flush_seq) {
		/*
		/*
		 * Barrier is already in progress and they can't be
		 * Sequenced flush is already in progress and they
		 * processed in parallel.  Queue for later processing.
		 * can't be processed in parallel.  Queue for later
		 * processing.
		 */
		 */
		list_move_tail(&rq->queuelist, &q->pending_barriers);
		list_move_tail(&rq->queuelist, &q->pending_flushes);
		return NULL;
		return NULL;
	}
	}


@@ -138,11 +136,11 @@ struct request *blk_do_ordered(struct request_queue *q, struct request *rq)
	}
	}


	/*
	/*
	 * Start a new ordered sequence
	 * Start a new flush sequence
	 */
	 */
	q->orderr = 0;
	q->flush_err = 0;
	q->ordered = q->next_ordered;
	q->ordered = q->next_ordered;
	q->ordseq |= QUEUE_ORDSEQ_STARTED;
	q->flush_seq |= QUEUE_FSEQ_STARTED;


	/*
	/*
	 * For an empty barrier, there's no actual BAR request, which
	 * For an empty barrier, there's no actual BAR request, which
@@ -154,19 +152,19 @@ struct request *blk_do_ordered(struct request_queue *q, struct request *rq)


	/* stash away the original request */
	/* stash away the original request */
	blk_dequeue_request(rq);
	blk_dequeue_request(rq);
	q->orig_bar_rq = rq;
	q->orig_flush_rq = rq;


	if (!(q->ordered & QUEUE_ORDERED_DO_PREFLUSH))
	if (!(q->ordered & QUEUE_ORDERED_DO_PREFLUSH))
		skip |= QUEUE_ORDSEQ_PREFLUSH;
		skip |= QUEUE_FSEQ_PREFLUSH;


	if (!(q->ordered & QUEUE_ORDERED_DO_BAR))
	if (!(q->ordered & QUEUE_ORDERED_DO_BAR))
		skip |= QUEUE_ORDSEQ_BAR;
		skip |= QUEUE_FSEQ_DATA;


	if (!(q->ordered & QUEUE_ORDERED_DO_POSTFLUSH))
	if (!(q->ordered & QUEUE_ORDERED_DO_POSTFLUSH))
		skip |= QUEUE_ORDSEQ_POSTFLUSH;
		skip |= QUEUE_FSEQ_POSTFLUSH;


	/* complete skipped sequences and return the first sequence */
	/* complete skipped sequences and return the first sequence */
	return blk_ordered_complete_seq(q, skip, 0);
	return blk_flush_complete_seq(q, skip, 0);
}
}


static void bio_end_empty_barrier(struct bio *bio, int err)
static void bio_end_empty_barrier(struct bio *bio, int err)
+2 −2
Original line number Original line Diff line number Diff line
@@ -51,7 +51,7 @@ static inline void blk_clear_rq_complete(struct request *rq)
 */
 */
#define ELV_ON_HASH(rq)		(!hlist_unhashed(&(rq)->hash))
#define ELV_ON_HASH(rq)		(!hlist_unhashed(&(rq)->hash))


struct request *blk_do_ordered(struct request_queue *q, struct request *rq);
struct request *blk_do_flush(struct request_queue *q, struct request *rq);


static inline struct request *__elv_next_request(struct request_queue *q)
static inline struct request *__elv_next_request(struct request_queue *q)
{
{
@@ -60,7 +60,7 @@ static inline struct request *__elv_next_request(struct request_queue *q)
	while (1) {
	while (1) {
		while (!list_empty(&q->queue_head)) {
		while (!list_empty(&q->queue_head)) {
			rq = list_entry_rq(q->queue_head.next);
			rq = list_entry_rq(q->queue_head.next);
			rq = blk_do_ordered(q, rq);
			rq = blk_do_flush(q, rq);
			if (rq)
			if (rq)
				return rq;
				return rq;
		}
		}
+12 −12
Original line number Original line Diff line number Diff line
@@ -357,13 +357,13 @@ struct request_queue
	/*
	/*
	 * for flush operations
	 * for flush operations
	 */
	 */
	unsigned int		ordered, next_ordered;
	unsigned int		flush_flags;
	unsigned int		flush_flags;

	unsigned int		flush_seq;
	unsigned int		ordered, next_ordered, ordseq;
	int			flush_err;
	int			orderr;
	struct request		flush_rq;
	struct request		bar_rq;
	struct request		*orig_flush_rq;
	struct request		*orig_bar_rq;
	struct list_head	pending_flushes;
	struct list_head	pending_barriers;


	struct mutex		sysfs_lock;
	struct mutex		sysfs_lock;


@@ -490,13 +490,13 @@ enum {
					  QUEUE_ORDERED_DO_FUA,
					  QUEUE_ORDERED_DO_FUA,


	/*
	/*
	 * Ordered operation sequence
	 * FLUSH/FUA sequences.
	 */
	 */
	QUEUE_ORDSEQ_STARTED	= (1 << 0), /* flushing in progress */
	QUEUE_FSEQ_STARTED	= (1 << 0), /* flushing in progress */
	QUEUE_ORDSEQ_PREFLUSH	= (1 << 1), /* pre-flushing in progress */
	QUEUE_FSEQ_PREFLUSH	= (1 << 1), /* pre-flushing in progress */
	QUEUE_ORDSEQ_BAR	= (1 << 2), /* barrier write in progress */
	QUEUE_FSEQ_DATA		= (1 << 2), /* data write in progress */
	QUEUE_ORDSEQ_POSTFLUSH	= (1 << 3), /* post-flushing in progress */
	QUEUE_FSEQ_POSTFLUSH	= (1 << 3), /* post-flushing in progress */
	QUEUE_ORDSEQ_DONE	= (1 << 4),
	QUEUE_FSEQ_DONE		= (1 << 4),
};
};


#define blk_queue_plugged(q)	test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
#define blk_queue_plugged(q)	test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)