Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d8dd0b6d authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-3.4/core' of git://git.kernel.dk/linux-block

Pull block core bits from Jens Axboe:
 "It's a nice and quiet round this time, since most of the tricky stuff
  has been pushed to 3.5 to give it more time to mature.  After a few
  hectic block IO core changes for 3.3 and 3.2, I'm quite happy with a
  slow round.

  Really minor stuff in here, the only real functional change is making
  the auto-unplug threshold a per-queue entity.  The threshold is set so
  that it's low enough that we don't hold off IO for too long, but still
  big enough to get a nice benefit from the batched insert (and hence
  queue lock cost reduction).  For raid configurations, this currently
  breaks down."

* 'for-3.4/core' of git://git.kernel.dk/linux-block:
  block: make auto block plug flush threshold per-disk based
  Documentation: Add sysfs ABI change for cfq's target latency.
  block: Make cfq_target_latency tunable through sysfs.
  block: use lockdep_assert_held for queue locking
  block: blk_alloc_queue_node(): use caller's GFP flags instead of GFP_KERNEL
parents 2d59dcfb 1b2e19f1
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
What:		/sys/block/<device>/iosched/target_latency
Date:		March 2012
contact:	Tao Ma <boyu.mt@taobao.com>
Description:
		The /sys/block/<device>/iosched/target_latency only exists
		when the user sets cfq to /sys/block/<device>/scheduler.
		It contains an estimated latency time for the cfq. cfq will
		use it to calculate the time slice used for every task.
+3 −2
Original line number Diff line number Diff line
@@ -483,7 +483,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
	if (!q)
		return NULL;

	q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
	q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
	if (q->id < 0)
		goto fail_q;

@@ -1277,6 +1277,7 @@ static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
	list_for_each_entry_reverse(rq, &plug->list, queuelist) {
		int el_ret;

		if (rq->q == q)
			(*request_count)++;

		if (rq->q != q || !blk_rq_merge_ok(rq, bio))
+1 −1
Original line number Diff line number Diff line
@@ -1218,7 +1218,7 @@ void blk_throtl_drain(struct request_queue *q)
	struct bio_list bl;
	struct bio *bio;

	WARN_ON_ONCE(!queue_is_locked(q));
	queue_lockdep_assert_held(q);

	bio_list_init(&bl);

+8 −2
Original line number Diff line number Diff line
@@ -295,6 +295,7 @@ struct cfq_data {
	unsigned int cfq_slice_idle;
	unsigned int cfq_group_idle;
	unsigned int cfq_latency;
	unsigned int cfq_target_latency;

	/*
	 * Fallback dummy cfqq for extreme OOM conditions
@@ -604,7 +605,7 @@ cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
{
	struct cfq_rb_root *st = &cfqd->grp_service_tree;

	return cfq_target_latency * cfqg->weight / st->total_weight;
	return cfqd->cfq_target_latency * cfqg->weight / st->total_weight;
}

static inline unsigned
@@ -2271,7 +2272,8 @@ new_workload:
		 * to have higher weight. A more accurate thing would be to
		 * calculate system wide asnc/sync ratio.
		 */
		tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
		tmp = cfqd->cfq_target_latency *
			cfqg_busy_async_queues(cfqd, cfqg);
		tmp = tmp/cfqd->busy_queues;
		slice = min_t(unsigned, slice, tmp);

@@ -3737,6 +3739,7 @@ static void *cfq_init_queue(struct request_queue *q)
	cfqd->cfq_back_penalty = cfq_back_penalty;
	cfqd->cfq_slice[0] = cfq_slice_async;
	cfqd->cfq_slice[1] = cfq_slice_sync;
	cfqd->cfq_target_latency = cfq_target_latency;
	cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
	cfqd->cfq_slice_idle = cfq_slice_idle;
	cfqd->cfq_group_idle = cfq_group_idle;
@@ -3788,6 +3791,7 @@ SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
#undef SHOW_FUNCTION

#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
@@ -3821,6 +3825,7 @@ STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
		UINT_MAX, 0);
STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
#undef STORE_FUNCTION

#define CFQ_ATTR(name) \
@@ -3838,6 +3843,7 @@ static struct elv_fs_entry cfq_attrs[] = {
	CFQ_ATTR(slice_idle),
	CFQ_ATTR(group_idle),
	CFQ_ATTR(low_latency),
	CFQ_ATTR(target_latency),
	__ATTR_NULL
};

+7 −11
Original line number Diff line number Diff line
@@ -426,14 +426,10 @@ struct request_queue {
				 (1 << QUEUE_FLAG_SAME_COMP)	|	\
				 (1 << QUEUE_FLAG_ADD_RANDOM))

static inline int queue_is_locked(struct request_queue *q)
static inline void queue_lockdep_assert_held(struct request_queue *q)
{
#ifdef CONFIG_SMP
	spinlock_t *lock = q->queue_lock;
	return lock && spin_is_locked(lock);
#else
	return 1;
#endif
	if (q->queue_lock)
		lockdep_assert_held(q->queue_lock);
}

static inline void queue_flag_set_unlocked(unsigned int flag,
@@ -445,7 +441,7 @@ static inline void queue_flag_set_unlocked(unsigned int flag,
static inline int queue_flag_test_and_clear(unsigned int flag,
					    struct request_queue *q)
{
	WARN_ON_ONCE(!queue_is_locked(q));
	queue_lockdep_assert_held(q);

	if (test_bit(flag, &q->queue_flags)) {
		__clear_bit(flag, &q->queue_flags);
@@ -458,7 +454,7 @@ static inline int queue_flag_test_and_clear(unsigned int flag,
static inline int queue_flag_test_and_set(unsigned int flag,
					  struct request_queue *q)
{
	WARN_ON_ONCE(!queue_is_locked(q));
	queue_lockdep_assert_held(q);

	if (!test_bit(flag, &q->queue_flags)) {
		__set_bit(flag, &q->queue_flags);
@@ -470,7 +466,7 @@ static inline int queue_flag_test_and_set(unsigned int flag,

static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
{
	WARN_ON_ONCE(!queue_is_locked(q));
	queue_lockdep_assert_held(q);
	__set_bit(flag, &q->queue_flags);
}

@@ -487,7 +483,7 @@ static inline int queue_in_flight(struct request_queue *q)

static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
{
	WARN_ON_ONCE(!queue_is_locked(q));
	queue_lockdep_assert_held(q);
	__clear_bit(flag, &q->queue_flags);
}