Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3a5e02ce authored by Mike Christie's avatar Mike Christie Committed by Jens Axboe
Browse files

block, drivers: add REQ_OP_FLUSH operation



This adds a REQ_OP_FLUSH operation that is sent to request_fn
based drivers by the block layer's flush code, instead of
sending requests with the request->cmd_flags REQ_FLUSH bit set.

Signed-off-by: default avatarMike Christie <mchristi@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 4e1b2d52
Loading
Loading
Loading
Loading
+3 −3
Original line number Original line Diff line number Diff line
@@ -73,9 +73,9 @@ doing:


	blk_queue_write_cache(sdkp->disk->queue, true, false);
	blk_queue_write_cache(sdkp->disk->queue, true, false);


and handle empty REQ_FLUSH requests in its prep_fn/request_fn.  Note that
and handle empty REQ_OP_FLUSH requests in its prep_fn/request_fn.  Note that
REQ_FLUSH requests with a payload are automatically turned into a sequence
REQ_FLUSH requests with a payload are automatically turned into a sequence
of an empty REQ_FLUSH request followed by the actual write by the block
of an empty REQ_OP_FLUSH request followed by the actual write by the block
layer.  For devices that also support the FUA bit the block layer needs
layer.  For devices that also support the FUA bit the block layer needs
to be told to pass through the REQ_FUA bit using:
to be told to pass through the REQ_FUA bit using:


@@ -83,4 +83,4 @@ to be told to pass through the REQ_FUA bit using:


and the driver must handle write requests that have the REQ_FUA bit set
and the driver must handle write requests that have the REQ_FUA bit set
in prep_fn/request_fn.  If the FUA bit is not natively supported the block
in prep_fn/request_fn.  If the FUA bit is not natively supported the block
layer turns it into an empty REQ_FLUSH request after the actual write.
layer turns it into an empty REQ_OP_FLUSH request after the actual write.
+1 −1
Original line number Original line Diff line number Diff line
@@ -1286,7 +1286,7 @@ static void do_ubd_request(struct request_queue *q)


		req = dev->request;
		req = dev->request;


		if (req->cmd_flags & REQ_FLUSH) {
		if (req_op(req) == REQ_OP_FLUSH) {
			io_req = kmalloc(sizeof(struct io_thread_req),
			io_req = kmalloc(sizeof(struct io_thread_req),
					 GFP_ATOMIC);
					 GFP_ATOMIC);
			if (io_req == NULL) {
			if (io_req == NULL) {
+2 −2
Original line number Original line Diff line number Diff line
@@ -29,7 +29,7 @@
 * The actual execution of flush is double buffered.  Whenever a request
 * The actual execution of flush is double buffered.  Whenever a request
 * needs to execute PRE or POSTFLUSH, it queues at
 * needs to execute PRE or POSTFLUSH, it queues at
 * fq->flush_queue[fq->flush_pending_idx].  Once certain criteria are met, a
 * fq->flush_queue[fq->flush_pending_idx].  Once certain criteria are met, a
 * flush is issued and the pending_idx is toggled.  When the flush
 * REQ_OP_FLUSH is issued and the pending_idx is toggled.  When the flush
 * completes, all the requests which were pending are proceeded to the next
 * completes, all the requests which were pending are proceeded to the next
 * step.  This allows arbitrary merging of different types of FLUSH/FUA
 * step.  This allows arbitrary merging of different types of FLUSH/FUA
 * requests.
 * requests.
@@ -330,7 +330,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
	}
	}


	flush_rq->cmd_type = REQ_TYPE_FS;
	flush_rq->cmd_type = REQ_TYPE_FS;
	flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
	req_set_op_attrs(flush_rq, REQ_OP_FLUSH, WRITE_FLUSH | REQ_FLUSH_SEQ);
	flush_rq->rq_disk = first_rq->rq_disk;
	flush_rq->rq_disk = first_rq->rq_disk;
	flush_rq->end_io = flush_end_io;
	flush_rq->end_io = flush_end_io;


+2 −2
Original line number Original line Diff line number Diff line
@@ -542,7 +542,7 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
	pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
	pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;


	if (op_is_write(req_op(rq))) {
	if (op_is_write(req_op(rq))) {
		if (rq->cmd_flags & REQ_FLUSH)
		if (req_op(rq) == REQ_OP_FLUSH)
			ret = lo_req_flush(lo, rq);
			ret = lo_req_flush(lo, rq);
		else if (req_op(rq) == REQ_OP_DISCARD)
		else if (req_op(rq) == REQ_OP_DISCARD)
			ret = lo_discard(lo, rq, pos);
			ret = lo_discard(lo, rq, pos);
@@ -1659,7 +1659,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
	if (lo->lo_state != Lo_bound)
	if (lo->lo_state != Lo_bound)
		return -EIO;
		return -EIO;


	if (lo->use_dio && (!(cmd->rq->cmd_flags & REQ_FLUSH) ||
	if (lo->use_dio && (req_op(cmd->rq) != REQ_OP_FLUSH ||
	    req_op(cmd->rq) == REQ_OP_DISCARD))
	    req_op(cmd->rq) == REQ_OP_DISCARD))
		cmd->use_aio = true;
		cmd->use_aio = true;
	else
	else
+1 −1
Original line number Original line Diff line number Diff line
@@ -284,7 +284,7 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
		type = NBD_CMD_DISC;
		type = NBD_CMD_DISC;
	else if (req_op(req) == REQ_OP_DISCARD)
	else if (req_op(req) == REQ_OP_DISCARD)
		type = NBD_CMD_TRIM;
		type = NBD_CMD_TRIM;
	else if (req->cmd_flags & REQ_FLUSH)
	else if (req_op(req) == REQ_OP_FLUSH)
		type = NBD_CMD_FLUSH;
		type = NBD_CMD_FLUSH;
	else if (rq_data_dir(req) == WRITE)
	else if (rq_data_dir(req) == WRITE)
		type = NBD_CMD_WRITE;
		type = NBD_CMD_WRITE;
Loading