Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d99cf9d6 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'post-2.6.15' of git://brick.kernel.dk/data/git/linux-2.6-block



Manual fixup for merge with Jens' "Suspend support for libata", commit
ID 9b847548.

Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parents 7ed40918 e650c305
Loading
Loading
Loading
Loading
+2 −8
Original line number Original line Diff line number Diff line
@@ -263,14 +263,8 @@ A flag in the bio structure, BIO_BARRIER is used to identify a barrier i/o.
The generic i/o scheduler would make sure that it places the barrier request and
The generic i/o scheduler would make sure that it places the barrier request and
all other requests coming after it after all the previous requests in the
all other requests coming after it after all the previous requests in the
queue. Barriers may be implemented in different ways depending on the
queue. Barriers may be implemented in different ways depending on the
driver. A SCSI driver for example could make use of ordered tags to
driver. For more details regarding I/O barriers, please read barrier.txt
preserve the necessary ordering with a lower impact on throughput. For IDE
in this directory.
this might be two sync cache flush: a pre and post flush when encountering
a barrier write.

There is a provision for queues to indicate what kind of barriers they
can provide. This is as of yet unmerged, details will be added here once it
is in the kernel.


1.2.2 Request Priority/Latency
1.2.2 Request Priority/Latency


+25 −119
Original line number Original line Diff line number Diff line
@@ -182,6 +182,9 @@ struct as_rq {


static kmem_cache_t *arq_pool;
static kmem_cache_t *arq_pool;


static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq);
static void as_antic_stop(struct as_data *ad);

/*
/*
 * IO Context helper functions
 * IO Context helper functions
 */
 */
@@ -370,7 +373,7 @@ static struct as_rq *as_find_first_arq(struct as_data *ad, int data_dir)
 * existing request against the same sector), which can happen when using
 * existing request against the same sector), which can happen when using
 * direct IO, then return the alias.
 * direct IO, then return the alias.
 */
 */
static struct as_rq *as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
static struct as_rq *__as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
{
{
	struct rb_node **p = &ARQ_RB_ROOT(ad, arq)->rb_node;
	struct rb_node **p = &ARQ_RB_ROOT(ad, arq)->rb_node;
	struct rb_node *parent = NULL;
	struct rb_node *parent = NULL;
@@ -397,6 +400,16 @@ static struct as_rq *as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
	return NULL;
	return NULL;
}
}


static void as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
{
	struct as_rq *alias;

	while ((unlikely(alias = __as_add_arq_rb(ad, arq)))) {
		as_move_to_dispatch(ad, alias);
		as_antic_stop(ad);
	}
}

static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq)
static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq)
{
{
	if (!ON_RB(&arq->rb_node)) {
	if (!ON_RB(&arq->rb_node)) {
@@ -1133,23 +1146,6 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
	/*
	/*
	 * take it off the sort and fifo list, add to dispatch queue
	 * take it off the sort and fifo list, add to dispatch queue
	 */
	 */
	while (!list_empty(&rq->queuelist)) {
		struct request *__rq = list_entry_rq(rq->queuelist.next);
		struct as_rq *__arq = RQ_DATA(__rq);

		list_del(&__rq->queuelist);

		elv_dispatch_add_tail(ad->q, __rq);

		if (__arq->io_context && __arq->io_context->aic)
			atomic_inc(&__arq->io_context->aic->nr_dispatched);

		WARN_ON(__arq->state != AS_RQ_QUEUED);
		__arq->state = AS_RQ_DISPATCHED;

		ad->nr_dispatched++;
	}

	as_remove_queued_request(ad->q, rq);
	as_remove_queued_request(ad->q, rq);
	WARN_ON(arq->state != AS_RQ_QUEUED);
	WARN_ON(arq->state != AS_RQ_QUEUED);


@@ -1325,42 +1321,6 @@ static int as_dispatch_request(request_queue_t *q, int force)
	return 1;
	return 1;
}
}


/*
 * Add arq to a list behind alias
 */
static inline void
as_add_aliased_request(struct as_data *ad, struct as_rq *arq,
				struct as_rq *alias)
{
	struct request  *req = arq->request;
	struct list_head *insert = alias->request->queuelist.prev;

	/*
	 * Transfer list of aliases
	 */
	while (!list_empty(&req->queuelist)) {
		struct request *__rq = list_entry_rq(req->queuelist.next);
		struct as_rq *__arq = RQ_DATA(__rq);

		list_move_tail(&__rq->queuelist, &alias->request->queuelist);

		WARN_ON(__arq->state != AS_RQ_QUEUED);
	}

	/*
	 * Another request with the same start sector on the rbtree.
	 * Link this request to that sector. They are untangled in
	 * as_move_to_dispatch
	 */
	list_add(&arq->request->queuelist, insert);

	/*
	 * Don't want to have to handle merges.
	 */
	as_del_arq_hash(arq);
	arq->request->flags |= REQ_NOMERGE;
}

/*
/*
 * add arq to rbtree and fifo
 * add arq to rbtree and fifo
 */
 */
@@ -1368,7 +1328,6 @@ static void as_add_request(request_queue_t *q, struct request *rq)
{
{
	struct as_data *ad = q->elevator->elevator_data;
	struct as_data *ad = q->elevator->elevator_data;
	struct as_rq *arq = RQ_DATA(rq);
	struct as_rq *arq = RQ_DATA(rq);
	struct as_rq *alias;
	int data_dir;
	int data_dir;


	arq->state = AS_RQ_NEW;
	arq->state = AS_RQ_NEW;
@@ -1387,33 +1346,17 @@ static void as_add_request(request_queue_t *q, struct request *rq)
		atomic_inc(&arq->io_context->aic->nr_queued);
		atomic_inc(&arq->io_context->aic->nr_queued);
	}
	}


	alias = as_add_arq_rb(ad, arq);
	as_add_arq_rb(ad, arq);
	if (!alias) {
	if (rq_mergeable(arq->request))
		as_add_arq_hash(ad, arq);

	/*
	/*
	 * set expire time (only used for reads) and add to fifo list
	 * set expire time (only used for reads) and add to fifo list
	 */
	 */
	arq->expires = jiffies + ad->fifo_expire[data_dir];
	arq->expires = jiffies + ad->fifo_expire[data_dir];
	list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]);
	list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]);


		if (rq_mergeable(arq->request))
			as_add_arq_hash(ad, arq);
	as_update_arq(ad, arq); /* keep state machine up to date */
	as_update_arq(ad, arq); /* keep state machine up to date */

	} else {
		as_add_aliased_request(ad, arq, alias);

		/*
		 * have we been anticipating this request?
		 * or does it come from the same process as the one we are
		 * anticipating for?
		 */
		if (ad->antic_status == ANTIC_WAIT_REQ
				|| ad->antic_status == ANTIC_WAIT_NEXT) {
			if (as_can_break_anticipation(ad, arq))
				as_antic_stop(ad);
		}
	}

	arq->state = AS_RQ_QUEUED;
	arq->state = AS_RQ_QUEUED;
}
}


@@ -1536,23 +1479,8 @@ static void as_merged_request(request_queue_t *q, struct request *req)
	 * if the merge was a front merge, we need to reposition request
	 * if the merge was a front merge, we need to reposition request
	 */
	 */
	if (rq_rb_key(req) != arq->rb_key) {
	if (rq_rb_key(req) != arq->rb_key) {
		struct as_rq *alias, *next_arq = NULL;

		if (ad->next_arq[arq->is_sync] == arq)
			next_arq = as_find_next_arq(ad, arq);

		/*
		 * Note! We should really be moving any old aliased requests
		 * off this request and try to insert them into the rbtree. We
		 * currently don't bother. Ditto the next function.
		 */
		as_del_arq_rb(ad, arq);
		as_del_arq_rb(ad, arq);
		if ((alias = as_add_arq_rb(ad, arq))) {
		as_add_arq_rb(ad, arq);
			list_del_init(&arq->fifo);
			as_add_aliased_request(ad, arq, alias);
			if (next_arq)
				ad->next_arq[arq->is_sync] = next_arq;
		}
		/*
		/*
		 * Note! At this stage of this and the next function, our next
		 * Note! At this stage of this and the next function, our next
		 * request may not be optimal - eg the request may have "grown"
		 * request may not be optimal - eg the request may have "grown"
@@ -1579,18 +1507,8 @@ static void as_merged_requests(request_queue_t *q, struct request *req,
	as_add_arq_hash(ad, arq);
	as_add_arq_hash(ad, arq);


	if (rq_rb_key(req) != arq->rb_key) {
	if (rq_rb_key(req) != arq->rb_key) {
		struct as_rq *alias, *next_arq = NULL;

		if (ad->next_arq[arq->is_sync] == arq)
			next_arq = as_find_next_arq(ad, arq);

		as_del_arq_rb(ad, arq);
		as_del_arq_rb(ad, arq);
		if ((alias = as_add_arq_rb(ad, arq))) {
		as_add_arq_rb(ad, arq);
			list_del_init(&arq->fifo);
			as_add_aliased_request(ad, arq, alias);
			if (next_arq)
				ad->next_arq[arq->is_sync] = next_arq;
		}
	}
	}


	/*
	/*
@@ -1609,18 +1527,6 @@ static void as_merged_requests(request_queue_t *q, struct request *req,
		}
		}
	}
	}


	/*
	 * Transfer list of aliases
	 */
	while (!list_empty(&next->queuelist)) {
		struct request *__rq = list_entry_rq(next->queuelist.next);
		struct as_rq *__arq = RQ_DATA(__rq);

		list_move_tail(&__rq->queuelist, &req->queuelist);

		WARN_ON(__arq->state != AS_RQ_QUEUED);
	}

	/*
	/*
	 * kill knowledge of next, this one is a goner
	 * kill knowledge of next, this one is a goner
	 */
	 */
+8 −8
Original line number Original line Diff line number Diff line
@@ -25,15 +25,15 @@
/*
/*
 * tunables
 * tunables
 */
 */
static int cfq_quantum = 4;		/* max queue in one round of service */
static const int cfq_quantum = 4;		/* max queue in one round of service */
static int cfq_queued = 8;		/* minimum rq allocate limit per-queue*/
static const int cfq_queued = 8;		/* minimum rq allocate limit per-queue*/
static int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
static int cfq_back_max = 16 * 1024;	/* maximum backwards seek, in KiB */
static const int cfq_back_max = 16 * 1024;	/* maximum backwards seek, in KiB */
static int cfq_back_penalty = 2;	/* penalty of a backwards seek */
static const int cfq_back_penalty = 2;		/* penalty of a backwards seek */


static int cfq_slice_sync = HZ / 10;
static const int cfq_slice_sync = HZ / 10;
static int cfq_slice_async = HZ / 25;
static int cfq_slice_async = HZ / 25;
static int cfq_slice_async_rq = 2;
static const int cfq_slice_async_rq = 2;
static int cfq_slice_idle = HZ / 100;
static int cfq_slice_idle = HZ / 100;


#define CFQ_IDLE_GRACE		(HZ / 10)
#define CFQ_IDLE_GRACE		(HZ / 10)
@@ -45,7 +45,7 @@ static int cfq_slice_idle = HZ / 100;
/*
/*
 * disable queueing at the driver/hardware level
 * disable queueing at the driver/hardware level
 */
 */
static int cfq_max_depth = 2;
static const int cfq_max_depth = 2;


/*
/*
 * for the hash of cfqq inside the cfqd
 * for the hash of cfqq inside the cfqd
+4 −4
Original line number Original line Diff line number Diff line
@@ -19,10 +19,10 @@
/*
/*
 * See Documentation/block/deadline-iosched.txt
 * See Documentation/block/deadline-iosched.txt
 */
 */
static int read_expire = HZ / 2;  /* max time before a read is submitted. */
static const int read_expire = HZ / 2;  /* max time before a read is submitted. */
static int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
static int writes_starved = 2;    /* max times reads can starve a write */
static const int writes_starved = 2;    /* max times reads can starve a write */
static int fifo_batch = 16;       /* # of sequential requests treated as one
static const int fifo_batch = 16;       /* # of sequential requests treated as one
				     by the above parameters. For throughput. */
				     by the above parameters. For throughput. */


static const int deadline_hash_shift = 5;
static const int deadline_hash_shift = 5;
+59 −27
Original line number Original line Diff line number Diff line
@@ -304,15 +304,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)


	rq->flags &= ~REQ_STARTED;
	rq->flags &= ~REQ_STARTED;


	/*
	__elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE, 0);
	 * if this is the flush, requeue the original instead and drop the flush
	 */
	if (rq->flags & REQ_BAR_FLUSH) {
		clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
		rq = rq->end_io_data;
	}

	__elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
}
}


static void elv_drain_elevator(request_queue_t *q)
static void elv_drain_elevator(request_queue_t *q)
@@ -332,7 +324,18 @@ static void elv_drain_elevator(request_queue_t *q)
void __elv_add_request(request_queue_t *q, struct request *rq, int where,
void __elv_add_request(request_queue_t *q, struct request *rq, int where,
		       int plug)
		       int plug)
{
{
	struct list_head *pos;
	unsigned ordseq;

	if (q->ordcolor)
		rq->flags |= REQ_ORDERED_COLOR;

	if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
	if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
		/*
		 * toggle ordered color
		 */
		q->ordcolor ^= 1;

		/*
		/*
		 * barriers implicitly indicate back insertion
		 * barriers implicitly indicate back insertion
		 */
		 */
@@ -393,6 +396,30 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
		q->elevator->ops->elevator_add_req_fn(q, rq);
		q->elevator->ops->elevator_add_req_fn(q, rq);
		break;
		break;


	case ELEVATOR_INSERT_REQUEUE:
		/*
		 * If ordered flush isn't in progress, we do front
		 * insertion; otherwise, requests should be requeued
		 * in ordseq order.
		 */
		rq->flags |= REQ_SOFTBARRIER;

		if (q->ordseq == 0) {
			list_add(&rq->queuelist, &q->queue_head);
			break;
		}

		ordseq = blk_ordered_req_seq(rq);

		list_for_each(pos, &q->queue_head) {
			struct request *pos_rq = list_entry_rq(pos);
			if (ordseq <= blk_ordered_req_seq(pos_rq))
				break;
		}

		list_add_tail(&rq->queuelist, pos);
		break;

	default:
	default:
		printk(KERN_ERR "%s: bad insertion point %d\n",
		printk(KERN_ERR "%s: bad insertion point %d\n",
		       __FUNCTION__, where);
		       __FUNCTION__, where);
@@ -422,25 +449,16 @@ static inline struct request *__elv_next_request(request_queue_t *q)
{
{
	struct request *rq;
	struct request *rq;


	if (unlikely(list_empty(&q->queue_head) &&
	while (1) {
		     !q->elevator->ops->elevator_dispatch_fn(q, 0)))
		while (!list_empty(&q->queue_head)) {
		return NULL;

			rq = list_entry_rq(q->queue_head.next);
			rq = list_entry_rq(q->queue_head.next);

			if (blk_do_ordered(q, &rq))
	/*
				return rq;
	 * if this is a barrier write and the device has to issue a
	 * flush sequence to support it, check how far we are
	 */
	if (blk_fs_request(rq) && blk_barrier_rq(rq)) {
		BUG_ON(q->ordered == QUEUE_ORDERED_NONE);

		if (q->ordered == QUEUE_ORDERED_FLUSH &&
		    !blk_barrier_preflush(rq))
			rq = blk_start_pre_flush(q, rq);
		}
		}


	return rq;
		if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
			return NULL;
	}
}
}


struct request *elv_next_request(request_queue_t *q)
struct request *elv_next_request(request_queue_t *q)
@@ -498,7 +516,7 @@ struct request *elv_next_request(request_queue_t *q)
			blkdev_dequeue_request(rq);
			blkdev_dequeue_request(rq);
			rq->flags |= REQ_QUIET;
			rq->flags |= REQ_QUIET;
			end_that_request_chunk(rq, 0, nr_bytes);
			end_that_request_chunk(rq, 0, nr_bytes);
			end_that_request_last(rq);
			end_that_request_last(rq, 0);
		} else {
		} else {
			printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
			printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
								ret);
								ret);
@@ -593,7 +611,21 @@ void elv_completed_request(request_queue_t *q, struct request *rq)
	 * request is released from the driver, io must be done
	 * request is released from the driver, io must be done
	 */
	 */
	if (blk_account_rq(rq)) {
	if (blk_account_rq(rq)) {
		struct request *first_rq = list_entry_rq(q->queue_head.next);

		q->in_flight--;
		q->in_flight--;

		/*
		 * Check if the queue is waiting for fs requests to be
		 * drained for flush sequence.
		 */
		if (q->ordseq && q->in_flight == 0 &&
		    blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
		    blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
			blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
			q->request_fn(q);
		}

		if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
		if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
			e->ops->elevator_completed_req_fn(q, rq);
			e->ops->elevator_completed_req_fn(q, rq);
	}
	}
Loading