Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 73aa8682 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
  block: Remove the extra check in queue_requests_store
  block, blk-sysfs: Fix an err return path in blk_register_queue()
  block: remove stale kerneldoc member from __blk_run_queue()
  block: get rid of QUEUE_FLAG_REENTER
  cfq-iosched: read_lock() does not always imply rcu_read_lock()
  block: kill blk_flush_plug_list() export
parents 2f666bcf 60735b63
Loading
Loading
Loading
Loading
+2 −11
Original line number Original line Diff line number Diff line
@@ -292,7 +292,6 @@ EXPORT_SYMBOL(blk_sync_queue);
/**
/**
 * __blk_run_queue - run a single device queue
 * __blk_run_queue - run a single device queue
 * @q:	The queue to run
 * @q:	The queue to run
 * @force_kblockd: Don't run @q->request_fn directly.  Use kblockd.
 *
 *
 * Description:
 * Description:
 *    See @blk_run_queue. This variant must be called with the queue lock
 *    See @blk_run_queue. This variant must be called with the queue lock
@@ -303,15 +302,7 @@ void __blk_run_queue(struct request_queue *q)
	if (unlikely(blk_queue_stopped(q)))
	if (unlikely(blk_queue_stopped(q)))
		return;
		return;


	/*
	 * Only recurse once to avoid overrunning the stack, let the unplug
	 * handling reinvoke the handler shortly if we already got there.
	 */
	if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
	q->request_fn(q);
	q->request_fn(q);
		queue_flag_clear(QUEUE_FLAG_REENTER, q);
	} else
		queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
}
}
EXPORT_SYMBOL(__blk_run_queue);
EXPORT_SYMBOL(__blk_run_queue);


@@ -328,6 +319,7 @@ void blk_run_queue_async(struct request_queue *q)
	if (likely(!blk_queue_stopped(q)))
	if (likely(!blk_queue_stopped(q)))
		queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
		queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
}
}
EXPORT_SYMBOL(blk_run_queue_async);


/**
/**
 * blk_run_queue - run a single device queue
 * blk_run_queue - run a single device queue
@@ -2787,7 +2779,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)


	local_irq_restore(flags);
	local_irq_restore(flags);
}
}
EXPORT_SYMBOL(blk_flush_plug_list);


void blk_finish_plug(struct blk_plug *plug)
void blk_finish_plug(struct blk_plug *plug)
{
{
+5 −3
Original line number Original line Diff line number Diff line
@@ -66,14 +66,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)


	if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
	if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
		blk_set_queue_full(q, BLK_RW_SYNC);
		blk_set_queue_full(q, BLK_RW_SYNC);
	} else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) {
	} else {
		blk_clear_queue_full(q, BLK_RW_SYNC);
		blk_clear_queue_full(q, BLK_RW_SYNC);
		wake_up(&rl->wait[BLK_RW_SYNC]);
		wake_up(&rl->wait[BLK_RW_SYNC]);
	}
	}


	if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
	if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
		blk_set_queue_full(q, BLK_RW_ASYNC);
		blk_set_queue_full(q, BLK_RW_ASYNC);
	} else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) {
	} else {
		blk_clear_queue_full(q, BLK_RW_ASYNC);
		blk_clear_queue_full(q, BLK_RW_ASYNC);
		wake_up(&rl->wait[BLK_RW_ASYNC]);
		wake_up(&rl->wait[BLK_RW_ASYNC]);
	}
	}
@@ -508,8 +508,10 @@ int blk_register_queue(struct gendisk *disk)
		return ret;
		return ret;


	ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
	ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
	if (ret < 0)
	if (ret < 0) {
		blk_trace_remove_sysfs(dev);
		return ret;
		return ret;
	}


	kobject_uevent(&q->kobj, KOBJ_ADD);
	kobject_uevent(&q->kobj, KOBJ_ADD);


+0 −1
Original line number Original line Diff line number Diff line
@@ -22,7 +22,6 @@ void blk_rq_timed_out_timer(unsigned long data);
void blk_delete_timer(struct request *);
void blk_delete_timer(struct request *);
void blk_add_timer(struct request *);
void blk_add_timer(struct request *);
void __generic_unplug_device(struct request_queue *);
void __generic_unplug_device(struct request_queue *);
void blk_run_queue_async(struct request_queue *q);


/*
/*
 * Internal atomic flags for request handling
 * Internal atomic flags for request handling
+6 −14
Original line number Original line Diff line number Diff line
@@ -2582,28 +2582,20 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
}
}


/*
/*
 * Must always be called with the rcu_read_lock() held
 * Call func for each cic attached to this ioc.
 */
 */
static void
static void
__call_for_each_cic(struct io_context *ioc,
call_for_each_cic(struct io_context *ioc,
		  void (*func)(struct io_context *, struct cfq_io_context *))
		  void (*func)(struct io_context *, struct cfq_io_context *))
{
{
	struct cfq_io_context *cic;
	struct cfq_io_context *cic;
	struct hlist_node *n;
	struct hlist_node *n;


	rcu_read_lock();

	hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
	hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
		func(ioc, cic);
		func(ioc, cic);
}


/*
 * Call func for each cic attached to this ioc.
 */
static void
call_for_each_cic(struct io_context *ioc,
		  void (*func)(struct io_context *, struct cfq_io_context *))
{
	rcu_read_lock();
	__call_for_each_cic(ioc, func);
	rcu_read_unlock();
	rcu_read_unlock();
}
}


@@ -2664,7 +2656,7 @@ static void cfq_free_io_context(struct io_context *ioc)
	 * should be ok to iterate over the known list, we will see all cic's
	 * should be ok to iterate over the known list, we will see all cic's
	 * since no new ones are added.
	 * since no new ones are added.
	 */
	 */
	__call_for_each_cic(ioc, cic_free_func);
	call_for_each_cic(ioc, cic_free_func);
}
}


static void cfq_put_cooperator(struct cfq_queue *cfqq)
static void cfq_put_cooperator(struct cfq_queue *cfqq)
+1 −16
Original line number Original line Diff line number Diff line
@@ -411,8 +411,6 @@ static void scsi_run_queue(struct request_queue *q)
	list_splice_init(&shost->starved_list, &starved_list);
	list_splice_init(&shost->starved_list, &starved_list);


	while (!list_empty(&starved_list)) {
	while (!list_empty(&starved_list)) {
		int flagset;

		/*
		/*
		 * As long as shost is accepting commands and we have
		 * As long as shost is accepting commands and we have
		 * starved queues, call blk_run_queue. scsi_request_fn
		 * starved queues, call blk_run_queue. scsi_request_fn
@@ -435,20 +433,7 @@ static void scsi_run_queue(struct request_queue *q)
			continue;
			continue;
		}
		}


		spin_unlock(shost->host_lock);
		blk_run_queue_async(sdev->request_queue);

		spin_lock(sdev->request_queue->queue_lock);
		flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
				!test_bit(QUEUE_FLAG_REENTER,
					&sdev->request_queue->queue_flags);
		if (flagset)
			queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
		__blk_run_queue(sdev->request_queue);
		if (flagset)
			queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
		spin_unlock(sdev->request_queue->queue_lock);

		spin_lock(shost->host_lock);
	}
	}
	/* put any unprocessed entries back */
	/* put any unprocessed entries back */
	list_splice(&starved_list, &shost->starved_list);
	list_splice(&starved_list, &shost->starved_list);
Loading