Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b4726a9d authored by Daniel Stodden's avatar Daniel Stodden Committed by Konrad Rzeszutek Wilk
Browse files

xen/blkback: Don't let in-flight requests defer pending ones.



Running RING_FINAL_CHECK_FOR_REQUESTS from make_response is a bad
idea. It means that in-flight I/O is essentially blocking continued
batches. This essentially kills throughput on frontends which unplug
(or even just notify) early and rightfully assume addtional requests
will be picked up on time, not synchronously.

Signed-off-by: default avatarDaniel Stodden <daniel.stodden@citrix.com>
[v1: Rebased and fixed compile problems]
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 2b727c63
Loading
Loading
Loading
Loading
+19 −17
Original line number Original line Diff line number Diff line
@@ -458,7 +458,8 @@ static void end_block_io_op(struct bio *bio, int error)
 * (which has the sectors we want, number of them, grant references, etc),
 * (which has the sectors we want, number of them, grant references, etc),
 * and transmute  it to the block API to hand it over to the proper block disk.
 * and transmute  it to the block API to hand it over to the proper block disk.
 */
 */
static int do_block_io_op(struct xen_blkif *blkif)
static int
__do_block_io_op(struct xen_blkif *blkif)
{
{
	union blkif_back_rings *blk_rings = &blkif->blk_rings;
	union blkif_back_rings *blk_rings = &blkif->blk_rings;
	struct blkif_request req;
	struct blkif_request req;
@@ -515,6 +516,23 @@ static int do_block_io_op(struct xen_blkif *blkif)
	return more_to_do;
	return more_to_do;
}
}


static int
do_block_io_op(struct xen_blkif *blkif)
{
	union blkif_back_rings *blk_rings = &blkif->blk_rings;
	int more_to_do;

	do {
		more_to_do = __do_block_io_op(blkif);
		if (more_to_do)
			break;

		RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
	} while (more_to_do);

	return more_to_do;
}

/*
/*
 * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
 * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
 * and call the 'submit_bio' to pass it to the underlying storage.
 * and call the 'submit_bio' to pass it to the underlying storage.
@@ -700,7 +718,6 @@ static void make_response(struct xen_blkif *blkif, u64 id,
	struct blkif_response  resp;
	struct blkif_response  resp;
	unsigned long     flags;
	unsigned long     flags;
	union blkif_back_rings *blk_rings = &blkif->blk_rings;
	union blkif_back_rings *blk_rings = &blkif->blk_rings;
	int more_to_do = 0;
	int notify;
	int notify;


	resp.id        = id;
	resp.id        = id;
@@ -727,22 +744,7 @@ static void make_response(struct xen_blkif *blkif, u64 id,
	}
	}
	blk_rings->common.rsp_prod_pvt++;
	blk_rings->common.rsp_prod_pvt++;
	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
	if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
		/*
		 * Tail check for pending requests. Allows frontend to avoid
		 * notifications if requests are already in flight (lower
		 * overheads and promotes batching).
		 */
		RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);

	} else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
		more_to_do = 1;
	}

	spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
	spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);

	if (more_to_do)
		blkif_notify_work(blkif);
	if (notify)
	if (notify)
		notify_remote_via_irq(blkif->irq);
		notify_remote_via_irq(blkif->irq);
}
}