Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d733ed6c authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
  block: make unplug timer trace event correspond to the schedule() unplug
  block: let io_schedule() flush the plug inline
parents 08150c53 49cac01e
Loading
Loading
Loading
Loading
+12 −6
Original line number Diff line number Diff line
@@ -2662,17 +2662,23 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
	return !(rqa->q <= rqb->q);
}

/*
 * If 'from_schedule' is true, then postpone the dispatch of requests
 * until a safe kblockd context. We due this to avoid accidental big
 * additional stack usage in driver dispatch, in places where the originally
 * plugger did not intend it.
 */
static void queue_unplugged(struct request_queue *q, unsigned int depth,
			    bool force_kblockd)
			    bool from_schedule)
{
	trace_block_unplug_io(q, depth);
	__blk_run_queue(q, force_kblockd);
	trace_block_unplug(q, depth, !from_schedule);
	__blk_run_queue(q, from_schedule);

	if (q->unplugged_fn)
		q->unplugged_fn(q);
}

void blk_flush_plug_list(struct blk_plug *plug, bool force_kblockd)
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
	struct request_queue *q;
	unsigned long flags;
@@ -2707,7 +2713,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool force_kblockd)
		BUG_ON(!rq->q);
		if (rq->q != q) {
			if (q) {
				queue_unplugged(q, depth, force_kblockd);
				queue_unplugged(q, depth, from_schedule);
				spin_unlock(q->queue_lock);
			}
			q = rq->q;
@@ -2728,7 +2734,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool force_kblockd)
	}

	if (q) {
		queue_unplugged(q, depth, force_kblockd);
		queue_unplugged(q, depth, from_schedule);
		spin_unlock(q->queue_lock);
	}

+13 −0
Original line number Diff line number Diff line
@@ -871,6 +871,14 @@ static inline void blk_flush_plug(struct task_struct *tsk)
{
	struct blk_plug *plug = tsk->plug;

	if (plug)
		blk_flush_plug_list(plug, false);
}

static inline void blk_schedule_flush_plug(struct task_struct *tsk)
{
	struct blk_plug *plug = tsk->plug;

	if (plug)
		blk_flush_plug_list(plug, true);
}
@@ -1317,6 +1325,11 @@ static inline void blk_flush_plug(struct task_struct *task)
{
}

static inline void blk_schedule_flush_plug(struct task_struct *task)
{
}


static inline bool blk_needs_flush_plug(struct task_struct *tsk)
{
	return false;
+7 −6
Original line number Diff line number Diff line
@@ -401,9 +401,9 @@ TRACE_EVENT(block_plug,

DECLARE_EVENT_CLASS(block_unplug,

	TP_PROTO(struct request_queue *q, unsigned int depth),
	TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),

	TP_ARGS(q, depth),
	TP_ARGS(q, depth, explicit),

	TP_STRUCT__entry(
		__field( int,		nr_rq			)
@@ -419,18 +419,19 @@ DECLARE_EVENT_CLASS(block_unplug,
);

/**
 * block_unplug_io - release of operations requests in request queue
 * block_unplug - release of operations requests in request queue
 * @q: request queue to unplug
 * @depth: number of requests just added to the queue
 * @explicit: whether this was an explicit unplug, or one from schedule()
 *
 * Unplug request queue @q because device driver is scheduled to work
 * on elements in the request queue.
 */
DEFINE_EVENT(block_unplug, block_unplug_io,
DEFINE_EVENT(block_unplug, block_unplug,

	TP_PROTO(struct request_queue *q, unsigned int depth),
	TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),

	TP_ARGS(q, depth)
	TP_ARGS(q, depth, explicit)
);

/**
+1 −1
Original line number Diff line number Diff line
@@ -4118,7 +4118,7 @@ need_resched:
			 */
			if (blk_needs_flush_plug(prev)) {
				raw_spin_unlock(&rq->lock);
				blk_flush_plug(prev);
				blk_schedule_flush_plug(prev);
				raw_spin_lock(&rq->lock);
			}
		}
+12 −6
Original line number Diff line number Diff line
@@ -850,16 +850,21 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q)
		__blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
}

static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q,
				    unsigned int depth)
static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
				    unsigned int depth, bool explicit)
{
	struct blk_trace *bt = q->blk_trace;

	if (bt) {
		__be64 rpdu = cpu_to_be64(depth);
		u32 what;

		__blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
				sizeof(rpdu), &rpdu);
		if (explicit)
			what = BLK_TA_UNPLUG_IO;
		else
			what = BLK_TA_UNPLUG_TIMER;

		__blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
	}
}

@@ -1002,7 +1007,7 @@ static void blk_register_tracepoints(void)
	WARN_ON(ret);
	ret = register_trace_block_plug(blk_add_trace_plug, NULL);
	WARN_ON(ret);
	ret = register_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
	ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
	WARN_ON(ret);
	ret = register_trace_block_split(blk_add_trace_split, NULL);
	WARN_ON(ret);
@@ -1017,7 +1022,7 @@ static void blk_unregister_tracepoints(void)
	unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
	unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
	unregister_trace_block_split(blk_add_trace_split, NULL);
	unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
	unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
	unregister_trace_block_plug(blk_add_trace_plug, NULL);
	unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
	unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
@@ -1332,6 +1337,7 @@ static const struct {
	[__BLK_TA_COMPLETE]	= {{  "C", "complete" },   blk_log_with_error },
	[__BLK_TA_PLUG]		= {{  "P", "plug" },	   blk_log_plug },
	[__BLK_TA_UNPLUG_IO]	= {{  "U", "unplug_io" },  blk_log_unplug },
	[__BLK_TA_UNPLUG_TIMER]	= {{ "UT", "unplug_timer" }, blk_log_unplug },
	[__BLK_TA_INSERT]	= {{  "I", "insert" },	   blk_log_generic },
	[__BLK_TA_SPLIT]	= {{  "X", "split" },	   blk_log_split },
	[__BLK_TA_BOUNCE]	= {{  "B", "bounce" },	   blk_log_generic },