Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8a83f331 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
  block: add blk_run_queue_async
  block: blk_delay_queue() should use kblockd workqueue
  md: fix up raid1/raid10 unplugging.
  md: incorporate new plugging into raid5.
  md: provide generic support for handling unplug callbacks.
  md - remove old plugging code.
  md/dm - remove remains of plug_fn callback.
  md: use new plugging interface for RAID IO.
  block: drop queue lock before calling __blk_run_queue() for kblockd punt
  Revert "block: add callback function for unplug notification"
  block: Enhance new plugging support to support general callbacks
parents 5d5b1b9f 24ecfbe2
Loading
Loading
Loading
Loading
+65 −18
Original line number Original line Diff line number Diff line
@@ -204,7 +204,7 @@ static void blk_delay_work(struct work_struct *work)


	q = container_of(work, struct request_queue, delay_work.work);
	q = container_of(work, struct request_queue, delay_work.work);
	spin_lock_irq(q->queue_lock);
	spin_lock_irq(q->queue_lock);
	__blk_run_queue(q, false);
	__blk_run_queue(q);
	spin_unlock_irq(q->queue_lock);
	spin_unlock_irq(q->queue_lock);
}
}


@@ -220,7 +220,8 @@ static void blk_delay_work(struct work_struct *work)
 */
 */
void blk_delay_queue(struct request_queue *q, unsigned long msecs)
void blk_delay_queue(struct request_queue *q, unsigned long msecs)
{
{
	schedule_delayed_work(&q->delay_work, msecs_to_jiffies(msecs));
	queue_delayed_work(kblockd_workqueue, &q->delay_work,
				msecs_to_jiffies(msecs));
}
}
EXPORT_SYMBOL(blk_delay_queue);
EXPORT_SYMBOL(blk_delay_queue);


@@ -238,7 +239,7 @@ void blk_start_queue(struct request_queue *q)
	WARN_ON(!irqs_disabled());
	WARN_ON(!irqs_disabled());


	queue_flag_clear(QUEUE_FLAG_STOPPED, q);
	queue_flag_clear(QUEUE_FLAG_STOPPED, q);
	__blk_run_queue(q, false);
	__blk_run_queue(q);
}
}
EXPORT_SYMBOL(blk_start_queue);
EXPORT_SYMBOL(blk_start_queue);


@@ -296,9 +297,8 @@ EXPORT_SYMBOL(blk_sync_queue);
 * Description:
 * Description:
 *    See @blk_run_queue. This variant must be called with the queue lock
 *    See @blk_run_queue. This variant must be called with the queue lock
 *    held and interrupts disabled.
 *    held and interrupts disabled.
 *
 */
 */
void __blk_run_queue(struct request_queue *q, bool force_kblockd)
void __blk_run_queue(struct request_queue *q)
{
{
	if (unlikely(blk_queue_stopped(q)))
	if (unlikely(blk_queue_stopped(q)))
		return;
		return;
@@ -307,7 +307,7 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd)
	 * Only recurse once to avoid overrunning the stack, let the unplug
	 * Only recurse once to avoid overrunning the stack, let the unplug
	 * handling reinvoke the handler shortly if we already got there.
	 * handling reinvoke the handler shortly if we already got there.
	 */
	 */
	if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
	if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
		q->request_fn(q);
		q->request_fn(q);
		queue_flag_clear(QUEUE_FLAG_REENTER, q);
		queue_flag_clear(QUEUE_FLAG_REENTER, q);
	} else
	} else
@@ -315,6 +315,20 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd)
}
}
EXPORT_SYMBOL(__blk_run_queue);
EXPORT_SYMBOL(__blk_run_queue);


/**
 * blk_run_queue_async - run a single device queue in workqueue context
 * @q:	The queue to run
 *
 * Description:
 *    Tells kblockd to perform the equivalent of @blk_run_queue on behalf
 *    of us.
 */
void blk_run_queue_async(struct request_queue *q)
{
	if (likely(!blk_queue_stopped(q)))
		queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
}

/**
/**
 * blk_run_queue - run a single device queue
 * blk_run_queue - run a single device queue
 * @q: The queue to run
 * @q: The queue to run
@@ -328,7 +342,7 @@ void blk_run_queue(struct request_queue *q)
	unsigned long flags;
	unsigned long flags;


	spin_lock_irqsave(q->queue_lock, flags);
	spin_lock_irqsave(q->queue_lock, flags);
	__blk_run_queue(q, false);
	__blk_run_queue(q);
	spin_unlock_irqrestore(q->queue_lock, flags);
	spin_unlock_irqrestore(q->queue_lock, flags);
}
}
EXPORT_SYMBOL(blk_run_queue);
EXPORT_SYMBOL(blk_run_queue);
@@ -977,7 +991,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
		blk_queue_end_tag(q, rq);
		blk_queue_end_tag(q, rq);


	add_acct_request(q, rq, where);
	add_acct_request(q, rq, where);
	__blk_run_queue(q, false);
	__blk_run_queue(q);
	spin_unlock_irqrestore(q->queue_lock, flags);
	spin_unlock_irqrestore(q->queue_lock, flags);
}
}
EXPORT_SYMBOL(blk_insert_request);
EXPORT_SYMBOL(blk_insert_request);
@@ -1321,7 +1335,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
	} else {
	} else {
		spin_lock_irq(q->queue_lock);
		spin_lock_irq(q->queue_lock);
		add_acct_request(q, req, where);
		add_acct_request(q, req, where);
		__blk_run_queue(q, false);
		__blk_run_queue(q);
out_unlock:
out_unlock:
		spin_unlock_irq(q->queue_lock);
		spin_unlock_irq(q->queue_lock);
	}
	}
@@ -2638,6 +2652,7 @@ void blk_start_plug(struct blk_plug *plug)


	plug->magic = PLUG_MAGIC;
	plug->magic = PLUG_MAGIC;
	INIT_LIST_HEAD(&plug->list);
	INIT_LIST_HEAD(&plug->list);
	INIT_LIST_HEAD(&plug->cb_list);
	plug->should_sort = 0;
	plug->should_sort = 0;


	/*
	/*
@@ -2670,12 +2685,41 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
 */
 */
static void queue_unplugged(struct request_queue *q, unsigned int depth,
static void queue_unplugged(struct request_queue *q, unsigned int depth,
			    bool from_schedule)
			    bool from_schedule)
	__releases(q->queue_lock)
{
{
	trace_block_unplug(q, depth, !from_schedule);
	trace_block_unplug(q, depth, !from_schedule);
	__blk_run_queue(q, from_schedule);


	if (q->unplugged_fn)
	/*
		q->unplugged_fn(q);
	 * If we are punting this to kblockd, then we can safely drop
	 * the queue_lock before waking kblockd (which needs to take
	 * this lock).
	 */
	if (from_schedule) {
		spin_unlock(q->queue_lock);
		blk_run_queue_async(q);
	} else {
		__blk_run_queue(q);
		spin_unlock(q->queue_lock);
	}

}

static void flush_plug_callbacks(struct blk_plug *plug)
{
	LIST_HEAD(callbacks);

	if (list_empty(&plug->cb_list))
		return;

	list_splice_init(&plug->cb_list, &callbacks);

	while (!list_empty(&callbacks)) {
		struct blk_plug_cb *cb = list_first_entry(&callbacks,
							  struct blk_plug_cb,
							  list);
		list_del(&cb->list);
		cb->callback(cb);
	}
}
}


void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
@@ -2688,6 +2732,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)


	BUG_ON(plug->magic != PLUG_MAGIC);
	BUG_ON(plug->magic != PLUG_MAGIC);


	flush_plug_callbacks(plug);
	if (list_empty(&plug->list))
	if (list_empty(&plug->list))
		return;
		return;


@@ -2712,10 +2757,11 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
		BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
		BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
		BUG_ON(!rq->q);
		BUG_ON(!rq->q);
		if (rq->q != q) {
		if (rq->q != q) {
			if (q) {
			/*
			 * This drops the queue lock
			 */
			if (q)
				queue_unplugged(q, depth, from_schedule);
				queue_unplugged(q, depth, from_schedule);
				spin_unlock(q->queue_lock);
			}
			q = rq->q;
			q = rq->q;
			depth = 0;
			depth = 0;
			spin_lock(q->queue_lock);
			spin_lock(q->queue_lock);
@@ -2733,10 +2779,11 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
		depth++;
		depth++;
	}
	}


	if (q) {
	/*
	 * This drops the queue lock
	 */
	if (q)
		queue_unplugged(q, depth, from_schedule);
		queue_unplugged(q, depth, from_schedule);
		spin_unlock(q->queue_lock);
	}


	local_irq_restore(flags);
	local_irq_restore(flags);
}
}
+1 −1
Original line number Original line Diff line number Diff line
@@ -55,7 +55,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
	WARN_ON(irqs_disabled());
	WARN_ON(irqs_disabled());
	spin_lock_irq(q->queue_lock);
	spin_lock_irq(q->queue_lock);
	__elv_add_request(q, rq, where);
	__elv_add_request(q, rq, where);
	__blk_run_queue(q, false);
	__blk_run_queue(q);
	/* the queue is stopped so it won't be plugged+unplugged */
	/* the queue is stopped so it won't be plugged+unplugged */
	if (rq->cmd_type == REQ_TYPE_PM_RESUME)
	if (rq->cmd_type == REQ_TYPE_PM_RESUME)
		q->request_fn(q);
		q->request_fn(q);
+2 −2
Original line number Original line Diff line number Diff line
@@ -218,7 +218,7 @@ static void flush_end_io(struct request *flush_rq, int error)
	 * request_fn may confuse the driver.  Always use kblockd.
	 * request_fn may confuse the driver.  Always use kblockd.
	 */
	 */
	if (queued)
	if (queued)
		__blk_run_queue(q, true);
		blk_run_queue_async(q);
}
}


/**
/**
@@ -274,7 +274,7 @@ static void flush_data_end_io(struct request *rq, int error)
	 * the comment in flush_end_io().
	 * the comment in flush_end_io().
	 */
	 */
	if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
	if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
		__blk_run_queue(q, true);
		blk_run_queue_async(q);
}
}


/**
/**
+0 −16
Original line number Original line Diff line number Diff line
@@ -790,22 +790,6 @@ void blk_queue_flush(struct request_queue *q, unsigned int flush)
}
}
EXPORT_SYMBOL_GPL(blk_queue_flush);
EXPORT_SYMBOL_GPL(blk_queue_flush);


/**
 * blk_queue_unplugged - register a callback for an unplug event
 * @q:		the request queue for the device
 * @fn:		the function to call
 *
 * Some stacked drivers may need to know when IO is dispatched on an
 * unplug event. By registrering a callback here, they will be notified
 * when someone flushes their on-stack queue plug. The function will be
 * called with the queue lock held.
 */
void blk_queue_unplugged(struct request_queue *q, unplugged_fn *fn)
{
	q->unplugged_fn = fn;
}
EXPORT_SYMBOL(blk_queue_unplugged);

static int __init blk_settings_init(void)
static int __init blk_settings_init(void)
{
{
	blk_max_low_pfn = max_low_pfn - 1;
	blk_max_low_pfn = max_low_pfn - 1;
+1 −0
Original line number Original line Diff line number Diff line
@@ -22,6 +22,7 @@ void blk_rq_timed_out_timer(unsigned long data);
void blk_delete_timer(struct request *);
void blk_delete_timer(struct request *);
void blk_add_timer(struct request *);
void blk_add_timer(struct request *);
void __generic_unplug_device(struct request_queue *);
void __generic_unplug_device(struct request_queue *);
void blk_run_queue_async(struct request_queue *q);


/*
/*
 * Internal atomic flags for request handling
 * Internal atomic flags for request handling
Loading