Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fb4b10ab authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
  block: kill loop_mutex
  blktrace: Remove blk_fill_rwbs_rq.
  block: blk-flush shouldn't call directly into q->request_fn() __blk_run_queue()
  block: add @force_kblockd to __blk_run_queue()
  block: fix kernel-doc format for blkdev_issue_zeroout
  blk-throttle: Do not use kblockd workqueue for throtl work
parents 83360269 fd51469f
Loading
Loading
Loading
Loading
+6 −12
Original line number Diff line number Diff line
@@ -352,7 +352,7 @@ void blk_start_queue(struct request_queue *q)
	WARN_ON(!irqs_disabled());

	queue_flag_clear(QUEUE_FLAG_STOPPED, q);
	__blk_run_queue(q);
	__blk_run_queue(q, false);
}
EXPORT_SYMBOL(blk_start_queue);

@@ -403,13 +403,14 @@ EXPORT_SYMBOL(blk_sync_queue);
/**
 * __blk_run_queue - run a single device queue
 * @q:	The queue to run
 * @force_kblockd: Don't run @q->request_fn directly.  Use kblockd.
 *
 * Description:
 *    See @blk_run_queue. This variant must be called with the queue lock
 *    held and interrupts disabled.
 *
 */
void __blk_run_queue(struct request_queue *q)
void __blk_run_queue(struct request_queue *q, bool force_kblockd)
{
	blk_remove_plug(q);

@@ -423,7 +424,7 @@ void __blk_run_queue(struct request_queue *q)
	 * Only recurse once to avoid overrunning the stack, let the unplug
	 * handling reinvoke the handler shortly if we already got there.
	 */
	if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
	if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
		q->request_fn(q);
		queue_flag_clear(QUEUE_FLAG_REENTER, q);
	} else {
@@ -446,7 +447,7 @@ void blk_run_queue(struct request_queue *q)
	unsigned long flags;

	spin_lock_irqsave(q->queue_lock, flags);
	__blk_run_queue(q);
	__blk_run_queue(q, false);
	spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_run_queue);
@@ -1053,7 +1054,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,

	drive_stat_acct(rq, 1);
	__elv_add_request(q, rq, where, 0);
	__blk_run_queue(q);
	__blk_run_queue(q, false);
	spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_insert_request);
@@ -2610,13 +2611,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
}
EXPORT_SYMBOL(kblockd_schedule_work);

int kblockd_schedule_delayed_work(struct request_queue *q,
			struct delayed_work *dwork, unsigned long delay)
{
	return queue_delayed_work(kblockd_workqueue, dwork, delay);
}
EXPORT_SYMBOL(kblockd_schedule_delayed_work);

int __init blk_dev_init(void)
{
	BUILD_BUG_ON(__REQ_NR_BITS > 8 *
+5 −3
Original line number Diff line number Diff line
@@ -66,10 +66,12 @@ static void blk_flush_complete_seq_end_io(struct request_queue *q,

	/*
	 * Moving a request silently to empty queue_head may stall the
	 * queue.  Kick the queue in those cases.
	 * queue.  Kick the queue in those cases.  This function is called
	 * from request completion path and calling directly into
	 * request_fn may confuse the driver.  Always use kblockd.
	 */
	if (was_empty && next_rq)
		__blk_run_queue(q);
		__blk_run_queue(q, true);
}

static void pre_flush_end_io(struct request *rq, int error)
@@ -130,7 +132,7 @@ static struct request *queue_next_fseq(struct request_queue *q)
		BUG();
	}

	elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
	elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
	return rq;
}

+1 −1
Original line number Diff line number Diff line
@@ -132,7 +132,7 @@ static void bio_batch_end_io(struct bio *bio, int err)
}

/**
 * blkdev_issue_zeroout generate number of zero filed write bios
 * blkdev_issue_zeroout - generate number of zero filed write bios
 * @bdev:	blockdev to issue
 * @sector:	start sector
 * @nr_sects:	number of sectors to write
+18 −11
Original line number Diff line number Diff line
@@ -20,6 +20,11 @@ static int throtl_quantum = 32;
/* Throttling is performed over 100ms slice and after that slice is renewed */
static unsigned long throtl_slice = HZ/10;	/* 100 ms */

/* A workqueue to queue throttle related work */
static struct workqueue_struct *kthrotld_workqueue;
static void throtl_schedule_delayed_work(struct throtl_data *td,
				unsigned long delay);

struct throtl_rb_root {
	struct rb_root rb;
	struct rb_node *left;
@@ -345,10 +350,9 @@ static void throtl_schedule_next_dispatch(struct throtl_data *td)
	update_min_dispatch_time(st);

	if (time_before_eq(st->min_disptime, jiffies))
		throtl_schedule_delayed_work(td->queue, 0);
		throtl_schedule_delayed_work(td, 0);
	else
		throtl_schedule_delayed_work(td->queue,
				(st->min_disptime - jiffies));
		throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
}

static inline void
@@ -815,10 +819,10 @@ void blk_throtl_work(struct work_struct *work)
}

/* Call with queue lock held */
void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
static void
throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
{

	struct throtl_data *td = q->td;
	struct delayed_work *dwork = &td->throtl_work;

	if (total_nr_queued(td) > 0) {
@@ -827,12 +831,11 @@ void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
		 * Cancel that and schedule a new one.
		 */
		__cancel_delayed_work(dwork);
		kblockd_schedule_delayed_work(q, dwork, delay);
		queue_delayed_work(kthrotld_workqueue, dwork, delay);
		throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
				delay, jiffies);
	}
}
EXPORT_SYMBOL(throtl_schedule_delayed_work);

static void
throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
@@ -920,7 +923,7 @@ static void throtl_update_blkio_group_read_bps(void *key,
	smp_mb__after_atomic_inc();

	/* Schedule a work now to process the limit change */
	throtl_schedule_delayed_work(td->queue, 0);
	throtl_schedule_delayed_work(td, 0);
}

static void throtl_update_blkio_group_write_bps(void *key,
@@ -934,7 +937,7 @@ static void throtl_update_blkio_group_write_bps(void *key,
	smp_mb__before_atomic_inc();
	atomic_inc(&td->limits_changed);
	smp_mb__after_atomic_inc();
	throtl_schedule_delayed_work(td->queue, 0);
	throtl_schedule_delayed_work(td, 0);
}

static void throtl_update_blkio_group_read_iops(void *key,
@@ -948,7 +951,7 @@ static void throtl_update_blkio_group_read_iops(void *key,
	smp_mb__before_atomic_inc();
	atomic_inc(&td->limits_changed);
	smp_mb__after_atomic_inc();
	throtl_schedule_delayed_work(td->queue, 0);
	throtl_schedule_delayed_work(td, 0);
}

static void throtl_update_blkio_group_write_iops(void *key,
@@ -962,7 +965,7 @@ static void throtl_update_blkio_group_write_iops(void *key,
	smp_mb__before_atomic_inc();
	atomic_inc(&td->limits_changed);
	smp_mb__after_atomic_inc();
	throtl_schedule_delayed_work(td->queue, 0);
	throtl_schedule_delayed_work(td, 0);
}

void throtl_shutdown_timer_wq(struct request_queue *q)
@@ -1135,6 +1138,10 @@ void blk_throtl_exit(struct request_queue *q)

static int __init throtl_init(void)
{
	kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
	if (!kthrotld_workqueue)
		panic("Failed to create kthrotld\n");

	blkio_policy_register(&blkio_policy_throtl);
	return 0;
}
+3 −3
Original line number Diff line number Diff line
@@ -3355,7 +3355,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
			    cfqd->busy_queues > 1) {
				cfq_del_timer(cfqd, cfqq);
				cfq_clear_cfqq_wait_request(cfqq);
				__blk_run_queue(cfqd->queue);
				__blk_run_queue(cfqd->queue, false);
			} else {
				cfq_blkiocg_update_idle_time_stats(
						&cfqq->cfqg->blkg);
@@ -3370,7 +3370,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
		 * this new queue is RT and the current one is BE
		 */
		cfq_preempt_queue(cfqd, cfqq);
		__blk_run_queue(cfqd->queue);
		__blk_run_queue(cfqd->queue, false);
	}
}

@@ -3731,7 +3731,7 @@ static void cfq_kick_queue(struct work_struct *work)
	struct request_queue *q = cfqd->queue;

	spin_lock_irq(q->queue_lock);
	__blk_run_queue(cfqd->queue);
	__blk_run_queue(cfqd->queue, false);
	spin_unlock_irq(q->queue_lock);
}

Loading