Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5cbc39a7 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'trivial-2.6.23' of git://git.kernel.dk/data/git/linux-2.6-block

* 'trivial-2.6.23' of git://git.kernel.dk/data/git/linux-2.6-block:
  Documentation/block/barrier.txt is not in sync with the actual code: - blk_queue_ordered() no longer has a gfp_mask parameter - blk_queue_ordered_locked() no longer exists - sd_prepare_flush() looks slightly different
  Use list_for_each_entry() instead of list_for_each() in the block device
  Make a "menuconfig" out of the Kconfig objects "menu, ..., endmenu",
  block/Kconfig already has its own "menuconfig" so remove these
  Use menuconfigs instead of menus, so the whole menu can be disabled at once
  cfq-iosched: fix async queue behaviour
  unexport bio_{,un}map_user
  Remove legacy CDROM drivers
  [PATCH] fix request->cmd == INT cases
  cciss: add new controller support for P700m
  [PATCH] Remove acsi.c
  [BLOCK] drop unnecessary bvec rewinding from flush_dry_bio_endio
  [PATCH] cdrom_sysctl_info fix
  blk_hw_contig_segment(): bad segment size checks
  [TRIVIAL PATCH] Kill blk_congestion_wait() stub for !CONFIG_BLOCK
parents 65f88f89 c0613c1c
Loading
Loading
Loading
Loading
+3 −13
Original line number Diff line number Diff line
@@ -82,23 +82,12 @@ including draining and flushing.
typedef void (prepare_flush_fn)(request_queue_t *q, struct request *rq);

int blk_queue_ordered(request_queue_t *q, unsigned ordered,
		      prepare_flush_fn *prepare_flush_fn,
		      unsigned gfp_mask);

int blk_queue_ordered_locked(request_queue_t *q, unsigned ordered,
			     prepare_flush_fn *prepare_flush_fn,
			     unsigned gfp_mask);

The only difference between the two functions is whether or not the
caller is holding q->queue_lock on entry.  The latter expects the
caller is holding the lock.
		      prepare_flush_fn *prepare_flush_fn);

@q			: the queue in question
@ordered		: the ordered mode the driver/device supports
@prepare_flush_fn	: this function should prepare @rq such that it
			  flushes cache to physical medium when executed
@gfp_mask		: gfp_mask used when allocating data structures
			  for ordered processing

For example, SCSI disk driver's prepare_flush_fn looks like the
following.
@@ -106,9 +95,10 @@ following.
static void sd_prepare_flush(request_queue_t *q, struct request *rq)
{
	memset(rq->cmd, 0, sizeof(rq->cmd));
	rq->flags |= REQ_BLOCK_PC;
	rq->cmd_type = REQ_TYPE_BLOCK_PC;
	rq->timeout = SD_TIMEOUT;
	rq->cmd[0] = SYNCHRONIZE_CACHE;
	rq->cmd_len = 10;
}

The following seven ordered modes are supported.  The following table
+2 −2
Original line number Diff line number Diff line
#
# Block layer core configuration
#
config BLOCK
menuconfig BLOCK
       bool "Enable the block layer" if EMBEDDED
       default y
       help
@@ -49,6 +49,6 @@ config LSF

	  If unsure, say Y.

endif
endif # BLOCK

source block/Kconfig.iosched
+36 −3
Original line number Diff line number Diff line
@@ -92,6 +92,8 @@ struct cfq_data {
	struct cfq_queue *active_queue;
	struct cfq_io_context *active_cic;

	struct cfq_queue *async_cfqq[IOPRIO_BE_NR];

	struct timer_list idle_class_timer;

	sector_t last_position;
@@ -1351,8 +1353,8 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc)
}

static struct cfq_queue *
cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
	      gfp_t gfp_mask)
cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
		     struct task_struct *tsk, gfp_t gfp_mask)
{
	struct cfq_queue *cfqq, *new_cfqq = NULL;
	struct cfq_io_context *cic;
@@ -1405,12 +1407,35 @@ cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
	if (new_cfqq)
		kmem_cache_free(cfq_pool, new_cfqq);

	atomic_inc(&cfqq->ref);
out:
	WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
	return cfqq;
}

static struct cfq_queue *
cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
	      gfp_t gfp_mask)
{
	const int ioprio = task_ioprio(tsk);
	struct cfq_queue *cfqq = NULL;

	if (!is_sync)
		cfqq = cfqd->async_cfqq[ioprio];
	if (!cfqq)
		cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask);

	/*
	 * pin the queue now that it's allocated, scheduler exit will prune it
	 */
	if (!is_sync && !cfqd->async_cfqq[ioprio]) {
		atomic_inc(&cfqq->ref);
		cfqd->async_cfqq[ioprio] = cfqq;
	}

	atomic_inc(&cfqq->ref);
	return cfqq;
}

/*
 * We drop cfq io contexts lazily, so we may find a dead one.
 */
@@ -2019,6 +2044,7 @@ static void cfq_exit_queue(elevator_t *e)
{
	struct cfq_data *cfqd = e->elevator_data;
	request_queue_t *q = cfqd->queue;
	int i;

	cfq_shutdown_timer_wq(cfqd);

@@ -2035,6 +2061,13 @@ static void cfq_exit_queue(elevator_t *e)
		__cfq_exit_single_io_context(cfqd, cic);
	}

	/*
	 * Put the async queues
	 */
	for (i = 0; i < IOPRIO_BE_NR; i++)
		if (cfqd->async_cfqq[i])	
			cfq_put_queue(cfqd->async_cfqq[i]);

	spin_unlock_irq(q->queue_lock);

	cfq_shutdown_timer_wq(cfqd);
+3 −10
Original line number Diff line number Diff line
@@ -112,12 +112,8 @@ static inline int elv_try_merge(struct request *__rq, struct bio *bio)
static struct elevator_type *elevator_find(const char *name)
{
	struct elevator_type *e;
	struct list_head *entry;

	list_for_each(entry, &elv_list) {

		e = list_entry(entry, struct elevator_type, list);

	list_for_each_entry(e, &elv_list, list) {
		if (!strcmp(e->elevator_name, name))
			return e;
	}
@@ -1116,14 +1112,11 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name)
{
	elevator_t *e = q->elevator;
	struct elevator_type *elv = e->elevator_type;
	struct list_head *entry;
	struct elevator_type *__e;
	int len = 0;

	spin_lock(&elv_list_lock);
	list_for_each(entry, &elv_list) {
		struct elevator_type *__e;

		__e = list_entry(entry, struct elevator_type, list);
	list_for_each_entry(__e, &elv_list, list) {
		if (!strcmp(elv->elevator_name, __e->elevator_name))
			len += sprintf(name+len, "[%s] ", elv->elevator_name);
		else
+2 −11
Original line number Diff line number Diff line
@@ -527,8 +527,6 @@ int blk_do_ordered(request_queue_t *q, struct request **rqp)
static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error)
{
	request_queue_t *q = bio->bi_private;
	struct bio_vec *bvec;
	int i;

	/*
	 * This is dry run, restore bio_sector and size.  We'll finish
@@ -540,13 +538,6 @@ static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error)
	if (bio->bi_size)
		return 1;

	/* Rewind bvec's */
	bio->bi_idx = 0;
	bio_for_each_segment(bvec, bio, i) {
		bvec->bv_len += bvec->bv_offset;
		bvec->bv_offset = 0;
	}

	/* Reset bio */
	set_bit(BIO_UPTODATE, &bio->bi_flags);
	bio->bi_size = q->bi_size;
@@ -1304,9 +1295,9 @@ static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
	if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID)))
		blk_recount_segments(q, nxt);
	if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
	    BIOVEC_VIRT_OVERSIZE(bio->bi_hw_front_size + bio->bi_hw_back_size))
	    BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size))
		return 0;
	if (bio->bi_size + nxt->bi_size > q->max_segment_size)
	if (bio->bi_hw_back_size + nxt->bi_hw_front_size > q->max_segment_size)
		return 0;

	return 1;
Loading