Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 94b5eb28 authored by Jens Axboe's avatar Jens Axboe
Browse files

block: fixup block IO unplug trace call



It was removed with the on-stack plugging, readd it and track the
depth of requests added when flushing the plug.

Signed-off-by: default avatarJens Axboe <jaxboe@fusionio.com>
parent d9c97833
Loading
Loading
Loading
Loading
+13 −2
Original line number Diff line number Diff line
@@ -2668,12 +2668,19 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
	return !(rqa->q <= rqb->q);
}

static void queue_unplugged(struct request_queue *q, unsigned int depth)
{
	trace_block_unplug_io(q, depth);
	__blk_run_queue(q, false);
}

static void flush_plug_list(struct blk_plug *plug)
{
	struct request_queue *q;
	unsigned long flags;
	struct request *rq;
	LIST_HEAD(list);
	unsigned int depth;

	BUG_ON(plug->magic != PLUG_MAGIC);

@@ -2688,6 +2695,7 @@ static void flush_plug_list(struct blk_plug *plug)
	}

	q = NULL;
	depth = 0;
	local_irq_save(flags);
	while (!list_empty(&list)) {
		rq = list_entry_rq(list.next);
@@ -2696,10 +2704,11 @@ static void flush_plug_list(struct blk_plug *plug)
		BUG_ON(!rq->q);
		if (rq->q != q) {
			if (q) {
				__blk_run_queue(q, false);
				queue_unplugged(q, depth);
				spin_unlock(q->queue_lock);
			}
			q = rq->q;
			depth = 0;
			spin_lock(q->queue_lock);
		}
		rq->cmd_flags &= ~REQ_ON_PLUG;
@@ -2711,10 +2720,12 @@ static void flush_plug_list(struct blk_plug *plug)
			__elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
		else
			__elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);

		depth++;
	}

	if (q) {
		__blk_run_queue(q, false);
		queue_unplugged(q, depth);
		spin_unlock(q->queue_lock);
	}

+6 −5
Original line number Diff line number Diff line
@@ -401,9 +401,9 @@ TRACE_EVENT(block_plug,

DECLARE_EVENT_CLASS(block_unplug,

	TP_PROTO(struct request_queue *q),
	TP_PROTO(struct request_queue *q, unsigned int depth),

	TP_ARGS(q),
	TP_ARGS(q, depth),

	TP_STRUCT__entry(
		__field( int,		nr_rq			)
@@ -411,7 +411,7 @@ DECLARE_EVENT_CLASS(block_unplug,
	),

	TP_fast_assign(
		__entry->nr_rq	= q->rq.count[READ] + q->rq.count[WRITE];
		__entry->nr_rq = depth;
		memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
	),

@@ -421,15 +421,16 @@ DECLARE_EVENT_CLASS(block_unplug,
/**
 * block_unplug_io - release of operations requests in request queue
 * @q: request queue to unplug
 * @depth: number of requests just added to the queue
 *
 * Unplug request queue @q because device driver is scheduled to work
 * on elements in the request queue.
 */
DEFINE_EVENT(block_unplug, block_unplug_io,

	TP_PROTO(struct request_queue *q),
	TP_PROTO(struct request_queue *q, unsigned int depth),

	TP_ARGS(q)
	TP_ARGS(q, depth)
);

/**
+3 −3
Original line number Diff line number Diff line
@@ -850,13 +850,13 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q)
		__blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
}

static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q)
static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q,
				    unsigned int depth)
{
	struct blk_trace *bt = q->blk_trace;

	if (bt) {
		unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
		__be64 rpdu = cpu_to_be64(pdu);
		__be64 rpdu = cpu_to_be64(depth);

		__blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
				sizeof(rpdu), &rpdu);