Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d4928196 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'cfq-ioc-share' of git://git.kernel.dk/linux-2.6-block

* 'cfq-ioc-share' of git://git.kernel.dk/linux-2.6-block:
  cfq-iosched: kill some big inlines
  cfq-iosched: relax IOPRIO_CLASS_IDLE restrictions
  kernel: add CLONE_IO to specifically request sharing of IO contexts
  io_context sharing - anticipatory changes
  block: cfq: make the io contect sharing lockless
  io_context sharing - cfq changes
  io context sharing: preliminary support
  ioprio: move io priority from task_struct to io_context
parents bb04af0e febffd61
Loading
Loading
Loading
Loading
+28 −6
Original line number Original line Diff line number Diff line
@@ -170,9 +170,11 @@ static void free_as_io_context(struct as_io_context *aic)


static void as_trim(struct io_context *ioc)
static void as_trim(struct io_context *ioc)
{
{
	spin_lock(&ioc->lock);
	if (ioc->aic)
	if (ioc->aic)
		free_as_io_context(ioc->aic);
		free_as_io_context(ioc->aic);
	ioc->aic = NULL;
	ioc->aic = NULL;
	spin_unlock(&ioc->lock);
}
}


/* Called when the task exits */
/* Called when the task exits */
@@ -462,7 +464,9 @@ static void as_antic_timeout(unsigned long data)
	spin_lock_irqsave(q->queue_lock, flags);
	spin_lock_irqsave(q->queue_lock, flags);
	if (ad->antic_status == ANTIC_WAIT_REQ
	if (ad->antic_status == ANTIC_WAIT_REQ
			|| ad->antic_status == ANTIC_WAIT_NEXT) {
			|| ad->antic_status == ANTIC_WAIT_NEXT) {
		struct as_io_context *aic = ad->io_context->aic;
		struct as_io_context *aic;
		spin_lock(&ad->io_context->lock);
		aic = ad->io_context->aic;


		ad->antic_status = ANTIC_FINISHED;
		ad->antic_status = ANTIC_FINISHED;
		kblockd_schedule_work(&ad->antic_work);
		kblockd_schedule_work(&ad->antic_work);
@@ -475,6 +479,7 @@ static void as_antic_timeout(unsigned long data)
			/* process not "saved" by a cooperating request */
			/* process not "saved" by a cooperating request */
			ad->exit_no_coop = (7*ad->exit_no_coop + 256)/8;
			ad->exit_no_coop = (7*ad->exit_no_coop + 256)/8;
		}
		}
		spin_unlock(&ad->io_context->lock);
	}
	}
	spin_unlock_irqrestore(q->queue_lock, flags);
	spin_unlock_irqrestore(q->queue_lock, flags);
}
}
@@ -635,9 +640,11 @@ static int as_can_break_anticipation(struct as_data *ad, struct request *rq)


	ioc = ad->io_context;
	ioc = ad->io_context;
	BUG_ON(!ioc);
	BUG_ON(!ioc);
	spin_lock(&ioc->lock);


	if (rq && ioc == RQ_IOC(rq)) {
	if (rq && ioc == RQ_IOC(rq)) {
		/* request from same process */
		/* request from same process */
		spin_unlock(&ioc->lock);
		return 1;
		return 1;
	}
	}


@@ -646,20 +653,25 @@ static int as_can_break_anticipation(struct as_data *ad, struct request *rq)
		 * In this situation status should really be FINISHED,
		 * In this situation status should really be FINISHED,
		 * however the timer hasn't had the chance to run yet.
		 * however the timer hasn't had the chance to run yet.
		 */
		 */
		spin_unlock(&ioc->lock);
		return 1;
		return 1;
	}
	}


	aic = ioc->aic;
	aic = ioc->aic;
	if (!aic)
	if (!aic) {
		spin_unlock(&ioc->lock);
		return 0;
		return 0;
	}


	if (atomic_read(&aic->nr_queued) > 0) {
	if (atomic_read(&aic->nr_queued) > 0) {
		/* process has more requests queued */
		/* process has more requests queued */
		spin_unlock(&ioc->lock);
		return 1;
		return 1;
	}
	}


	if (atomic_read(&aic->nr_dispatched) > 0) {
	if (atomic_read(&aic->nr_dispatched) > 0) {
		/* process has more requests dispatched */
		/* process has more requests dispatched */
		spin_unlock(&ioc->lock);
		return 1;
		return 1;
	}
	}


@@ -680,6 +692,7 @@ static int as_can_break_anticipation(struct as_data *ad, struct request *rq)
		}
		}


		as_update_iohist(ad, aic, rq);
		as_update_iohist(ad, aic, rq);
		spin_unlock(&ioc->lock);
		return 1;
		return 1;
	}
	}


@@ -688,20 +701,27 @@ static int as_can_break_anticipation(struct as_data *ad, struct request *rq)
		if (aic->ttime_samples == 0)
		if (aic->ttime_samples == 0)
			ad->exit_prob = (7*ad->exit_prob + 256)/8;
			ad->exit_prob = (7*ad->exit_prob + 256)/8;


		if (ad->exit_no_coop > 128)
		if (ad->exit_no_coop > 128) {
			spin_unlock(&ioc->lock);
			return 1;
			return 1;
		}
		}
	}


	if (aic->ttime_samples == 0) {
	if (aic->ttime_samples == 0) {
		if (ad->new_ttime_mean > ad->antic_expire)
		if (ad->new_ttime_mean > ad->antic_expire) {
			spin_unlock(&ioc->lock);
			return 1;
			return 1;
		if (ad->exit_prob * ad->exit_no_coop > 128*256)
		}
		if (ad->exit_prob * ad->exit_no_coop > 128*256) {
			spin_unlock(&ioc->lock);
			return 1;
			return 1;
		}
	} else if (aic->ttime_mean > ad->antic_expire) {
	} else if (aic->ttime_mean > ad->antic_expire) {
		/* the process thinks too much between requests */
		/* the process thinks too much between requests */
		spin_unlock(&ioc->lock);
		return 1;
		return 1;
	}
	}

	spin_unlock(&ioc->lock);
	return 0;
	return 0;
}
}


@@ -1255,7 +1275,9 @@ static void as_merged_requests(struct request_queue *q, struct request *req,
			 * Don't copy here but swap, because when anext is
			 * Don't copy here but swap, because when anext is
			 * removed below, it must contain the unused context
			 * removed below, it must contain the unused context
			 */
			 */
			double_spin_lock(&rioc->lock, &nioc->lock, rioc < nioc);
			swap_io_context(&rioc, &nioc);
			swap_io_context(&rioc, &nioc);
			double_spin_unlock(&rioc->lock, &nioc->lock, rioc < nioc);
		}
		}
	}
	}


+206 −219
Original line number Original line Diff line number Diff line
@@ -26,9 +26,9 @@ static const int cfq_slice_async_rq = 2;
static int cfq_slice_idle = HZ / 125;
static int cfq_slice_idle = HZ / 125;


/*
/*
 * grace period before allowing idle class to get disk access
 * offset from end of service tree
 */
 */
#define CFQ_IDLE_GRACE		(HZ / 10)
#define CFQ_IDLE_DELAY		(HZ / 5)


/*
/*
 * below this threshold, we consider thinktime immediate
 * below this threshold, we consider thinktime immediate
@@ -98,8 +98,6 @@ struct cfq_data {
	struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
	struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
	struct cfq_queue *async_idle_cfqq;
	struct cfq_queue *async_idle_cfqq;


	struct timer_list idle_class_timer;

	sector_t last_position;
	sector_t last_position;
	unsigned long last_end_request;
	unsigned long last_end_request;


@@ -199,8 +197,8 @@ CFQ_CFQQ_FNS(sync);


static void cfq_dispatch_insert(struct request_queue *, struct request *);
static void cfq_dispatch_insert(struct request_queue *, struct request *);
static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
				       struct task_struct *, gfp_t);
				       struct io_context *, gfp_t);
static struct cfq_io_context *cfq_cic_rb_lookup(struct cfq_data *,
static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
						struct io_context *);
						struct io_context *);


static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
@@ -384,12 +382,15 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
/*
/*
 * The below is leftmost cache rbtree addon
 * The below is leftmost cache rbtree addon
 */
 */
static struct rb_node *cfq_rb_first(struct cfq_rb_root *root)
static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
{
{
	if (!root->left)
	if (!root->left)
		root->left = rb_first(&root->rb);
		root->left = rb_first(&root->rb);


	return root->left;
	if (root->left)
		return rb_entry(root->left, struct cfq_queue, rb_node);

	return NULL;
}
}


static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
@@ -446,12 +447,20 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
static void cfq_service_tree_add(struct cfq_data *cfqd,
static void cfq_service_tree_add(struct cfq_data *cfqd,
				    struct cfq_queue *cfqq, int add_front)
				    struct cfq_queue *cfqq, int add_front)
{
{
	struct rb_node **p = &cfqd->service_tree.rb.rb_node;
	struct rb_node **p, *parent;
	struct rb_node *parent = NULL;
	struct cfq_queue *__cfqq;
	unsigned long rb_key;
	unsigned long rb_key;
	int left;
	int left;


	if (!add_front) {
	if (cfq_class_idle(cfqq)) {
		rb_key = CFQ_IDLE_DELAY;
		parent = rb_last(&cfqd->service_tree.rb);
		if (parent && parent != &cfqq->rb_node) {
			__cfqq = rb_entry(parent, struct cfq_queue, rb_node);
			rb_key += __cfqq->rb_key;
		} else
			rb_key += jiffies;
	} else if (!add_front) {
		rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
		rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
		rb_key += cfqq->slice_resid;
		rb_key += cfqq->slice_resid;
		cfqq->slice_resid = 0;
		cfqq->slice_resid = 0;
@@ -469,8 +478,9 @@ static void cfq_service_tree_add(struct cfq_data *cfqd,
	}
	}


	left = 1;
	left = 1;
	parent = NULL;
	p = &cfqd->service_tree.rb.rb_node;
	while (*p) {
	while (*p) {
		struct cfq_queue *__cfqq;
		struct rb_node **n;
		struct rb_node **n;


		parent = *p;
		parent = *p;
@@ -524,8 +534,7 @@ static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 * add to busy list of queues for service, trying to be fair in ordering
 * add to busy list of queues for service, trying to be fair in ordering
 * the pending list according to last request service
 * the pending list according to last request service
 */
 */
static inline void
static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
{
	BUG_ON(cfq_cfqq_on_rr(cfqq));
	BUG_ON(cfq_cfqq_on_rr(cfqq));
	cfq_mark_cfqq_on_rr(cfqq);
	cfq_mark_cfqq_on_rr(cfqq);
@@ -538,8 +547,7 @@ cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 * Called when the cfqq no longer has requests pending, remove it from
 * Called when the cfqq no longer has requests pending, remove it from
 * the service tree.
 * the service tree.
 */
 */
static inline void
static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
{
	BUG_ON(!cfq_cfqq_on_rr(cfqq));
	BUG_ON(!cfq_cfqq_on_rr(cfqq));
	cfq_clear_cfqq_on_rr(cfqq);
	cfq_clear_cfqq_on_rr(cfqq);
@@ -554,7 +562,7 @@ cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
/*
/*
 * rb tree support functions
 * rb tree support functions
 */
 */
static inline void cfq_del_rq_rb(struct request *rq)
static void cfq_del_rq_rb(struct request *rq)
{
{
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
	struct cfq_data *cfqd = cfqq->cfqd;
	struct cfq_data *cfqd = cfqq->cfqd;
@@ -594,8 +602,7 @@ static void cfq_add_rq_rb(struct request *rq)
	BUG_ON(!cfqq->next_rq);
	BUG_ON(!cfqq->next_rq);
}
}


static inline void
static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
{
{
	elv_rb_del(&cfqq->sort_list, rq);
	elv_rb_del(&cfqq->sort_list, rq);
	cfqq->queued[rq_is_sync(rq)]--;
	cfqq->queued[rq_is_sync(rq)]--;
@@ -609,7 +616,7 @@ cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
	struct cfq_io_context *cic;
	struct cfq_io_context *cic;
	struct cfq_queue *cfqq;
	struct cfq_queue *cfqq;


	cic = cfq_cic_rb_lookup(cfqd, tsk->io_context);
	cic = cfq_cic_lookup(cfqd, tsk->io_context);
	if (!cic)
	if (!cic)
		return NULL;
		return NULL;


@@ -721,7 +728,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
	 * Lookup the cfqq that this bio will be queued with. Allow
	 * Lookup the cfqq that this bio will be queued with. Allow
	 * merge only if rq is queued there.
	 * merge only if rq is queued there.
	 */
	 */
	cic = cfq_cic_rb_lookup(cfqd, current->io_context);
	cic = cfq_cic_lookup(cfqd, current->io_context);
	if (!cic)
	if (!cic)
		return 0;
		return 0;


@@ -732,15 +739,10 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
	return 0;
	return 0;
}
}


static inline void
static void __cfq_set_active_queue(struct cfq_data *cfqd,
__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
				   struct cfq_queue *cfqq)
{
{
	if (cfqq) {
	if (cfqq) {
		/*
		 * stop potential idle class queues waiting service
		 */
		del_timer(&cfqd->idle_class_timer);

		cfqq->slice_end = 0;
		cfqq->slice_end = 0;
		cfq_clear_cfqq_must_alloc_slice(cfqq);
		cfq_clear_cfqq_must_alloc_slice(cfqq);
		cfq_clear_cfqq_fifo_expire(cfqq);
		cfq_clear_cfqq_fifo_expire(cfqq);
@@ -789,47 +791,16 @@ static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out)
		__cfq_slice_expired(cfqd, cfqq, timed_out);
		__cfq_slice_expired(cfqd, cfqq, timed_out);
}
}


static int start_idle_class_timer(struct cfq_data *cfqd)
{
	unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE;
	unsigned long now = jiffies;

	if (time_before(now, end) &&
	    time_after_eq(now, cfqd->last_end_request)) {
		mod_timer(&cfqd->idle_class_timer, end);
		return 1;
	}

	return 0;
}

/*
/*
 * Get next queue for service. Unless we have a queue preemption,
 * Get next queue for service. Unless we have a queue preemption,
 * we'll simply select the first cfqq in the service tree.
 * we'll simply select the first cfqq in the service tree.
 */
 */
static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
{
{
	struct cfq_queue *cfqq;
	struct rb_node *n;

	if (RB_EMPTY_ROOT(&cfqd->service_tree.rb))
	if (RB_EMPTY_ROOT(&cfqd->service_tree.rb))
		return NULL;
		return NULL;


	n = cfq_rb_first(&cfqd->service_tree);
	return cfq_rb_first(&cfqd->service_tree);
	cfqq = rb_entry(n, struct cfq_queue, rb_node);

	if (cfq_class_idle(cfqq)) {
		/*
		 * if we have idle queues and no rt or be queues had
		 * pending requests, either allow immediate service if
		 * the grace period has passed or arm the idle grace
		 * timer
		 */
		if (start_idle_class_timer(cfqd))
			cfqq = NULL;
	}

	return cfqq;
}
}


/*
/*
@@ -895,7 +866,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
	 * task has exited, don't wait
	 * task has exited, don't wait
	 */
	 */
	cic = cfqd->active_cic;
	cic = cfqd->active_cic;
	if (!cic || !cic->ioc->task)
	if (!cic || !atomic_read(&cic->ioc->nr_tasks))
		return;
		return;


	/*
	/*
@@ -939,7 +910,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
/*
/*
 * return expired entry, or NULL to just start from scratch in rbtree
 * return expired entry, or NULL to just start from scratch in rbtree
 */
 */
static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq)
static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
{
{
	struct cfq_data *cfqd = cfqq->cfqd;
	struct cfq_data *cfqd = cfqq->cfqd;
	struct request *rq;
	struct request *rq;
@@ -1068,7 +1039,7 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
	return dispatched;
	return dispatched;
}
}


static inline int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
{
{
	int dispatched = 0;
	int dispatched = 0;


@@ -1087,14 +1058,11 @@ static inline int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
 */
 */
static int cfq_forced_dispatch(struct cfq_data *cfqd)
static int cfq_forced_dispatch(struct cfq_data *cfqd)
{
{
	struct cfq_queue *cfqq;
	int dispatched = 0;
	int dispatched = 0;
	struct rb_node *n;

	while ((n = cfq_rb_first(&cfqd->service_tree)) != NULL) {
		struct cfq_queue *cfqq = rb_entry(n, struct cfq_queue, rb_node);


	while ((cfqq = cfq_rb_first(&cfqd->service_tree)) != NULL)
		dispatched += __cfq_forced_dispatch_cfqq(cfqq);
		dispatched += __cfq_forced_dispatch_cfqq(cfqq);
	}


	cfq_slice_expired(cfqd, 0);
	cfq_slice_expired(cfqd, 0);


@@ -1170,21 +1138,70 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
	kmem_cache_free(cfq_pool, cfqq);
	kmem_cache_free(cfq_pool, cfqq);
}
}


static void cfq_free_io_context(struct io_context *ioc)
/*
 * Call func for each cic attached to this ioc. Returns number of cic's seen.
 */
#define CIC_GANG_NR	16
static unsigned int
call_for_each_cic(struct io_context *ioc,
		  void (*func)(struct io_context *, struct cfq_io_context *))
{
	struct cfq_io_context *cics[CIC_GANG_NR];
	unsigned long index = 0;
	unsigned int called = 0;
	int nr;

	rcu_read_lock();

	do {
		int i;

		/*
		 * Perhaps there's a better way - this just gang lookups from
		 * 0 to the end, restarting after each CIC_GANG_NR from the
		 * last key + 1.
		 */
		nr = radix_tree_gang_lookup(&ioc->radix_root, (void **) cics,
						index, CIC_GANG_NR);
		if (!nr)
			break;

		called += nr;
		index = 1 + (unsigned long) cics[nr - 1]->key;

		for (i = 0; i < nr; i++)
			func(ioc, cics[i]);
	} while (nr == CIC_GANG_NR);

	rcu_read_unlock();

	return called;
}

static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
{
{
	struct cfq_io_context *__cic;
	unsigned long flags;
	struct rb_node *n;

	int freed = 0;
	BUG_ON(!cic->dead_key);


	ioc->ioc_data = NULL;
	spin_lock_irqsave(&ioc->lock, flags);
	radix_tree_delete(&ioc->radix_root, cic->dead_key);
	spin_unlock_irqrestore(&ioc->lock, flags);


	while ((n = rb_first(&ioc->cic_root)) != NULL) {
	kmem_cache_free(cfq_ioc_pool, cic);
		__cic = rb_entry(n, struct cfq_io_context, rb_node);
		rb_erase(&__cic->rb_node, &ioc->cic_root);
		kmem_cache_free(cfq_ioc_pool, __cic);
		freed++;
}
}


static void cfq_free_io_context(struct io_context *ioc)
{
	int freed;

	/*
	 * ioc->refcount is zero here, so no more cic's are allowed to be
	 * linked into this ioc. So it should be ok to iterate over the known
	 * list, we will see all cic's since no new ones are added.
	 */
	freed = call_for_each_cic(ioc, cic_free_func);

	elv_ioc_count_mod(ioc_count, -freed);
	elv_ioc_count_mod(ioc_count, -freed);


	if (ioc_gone && !elv_ioc_count_read(ioc_count))
	if (ioc_gone && !elv_ioc_count_read(ioc_count))
@@ -1205,7 +1222,12 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
					 struct cfq_io_context *cic)
					 struct cfq_io_context *cic)
{
{
	list_del_init(&cic->queue_list);
	list_del_init(&cic->queue_list);

	/*
	 * Make sure key == NULL is seen for dead queues
	 */
	smp_wmb();
	smp_wmb();
	cic->dead_key = (unsigned long) cic->key;
	cic->key = NULL;
	cic->key = NULL;


	if (cic->cfqq[ASYNC]) {
	if (cic->cfqq[ASYNC]) {
@@ -1219,16 +1241,18 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
	}
	}
}
}


static void cfq_exit_single_io_context(struct cfq_io_context *cic)
static void cfq_exit_single_io_context(struct io_context *ioc,
				       struct cfq_io_context *cic)
{
{
	struct cfq_data *cfqd = cic->key;
	struct cfq_data *cfqd = cic->key;


	if (cfqd) {
	if (cfqd) {
		struct request_queue *q = cfqd->queue;
		struct request_queue *q = cfqd->queue;
		unsigned long flags;


		spin_lock_irq(q->queue_lock);
		spin_lock_irqsave(q->queue_lock, flags);
		__cfq_exit_single_io_context(cfqd, cic);
		__cfq_exit_single_io_context(cfqd, cic);
		spin_unlock_irq(q->queue_lock);
		spin_unlock_irqrestore(q->queue_lock, flags);
	}
	}
}
}


@@ -1238,21 +1262,8 @@ static void cfq_exit_single_io_context(struct cfq_io_context *cic)
 */
 */
static void cfq_exit_io_context(struct io_context *ioc)
static void cfq_exit_io_context(struct io_context *ioc)
{
{
	struct cfq_io_context *__cic;
	rcu_assign_pointer(ioc->ioc_data, NULL);
	struct rb_node *n;
	call_for_each_cic(ioc, cfq_exit_single_io_context);

	ioc->ioc_data = NULL;

	/*
	 * put the reference this task is holding to the various queues
	 */
	n = rb_first(&ioc->cic_root);
	while (n != NULL) {
		__cic = rb_entry(n, struct cfq_io_context, rb_node);

		cfq_exit_single_io_context(__cic);
		n = rb_next(n);
	}
}
}


static struct cfq_io_context *
static struct cfq_io_context *
@@ -1273,7 +1284,7 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
	return cic;
	return cic;
}
}


static void cfq_init_prio_data(struct cfq_queue *cfqq)
static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
{
{
	struct task_struct *tsk = current;
	struct task_struct *tsk = current;
	int ioprio_class;
	int ioprio_class;
@@ -1281,7 +1292,7 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq)
	if (!cfq_cfqq_prio_changed(cfqq))
	if (!cfq_cfqq_prio_changed(cfqq))
		return;
		return;


	ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio);
	ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
	switch (ioprio_class) {
	switch (ioprio_class) {
		default:
		default:
			printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
			printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
@@ -1293,11 +1304,11 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq)
			cfqq->ioprio_class = IOPRIO_CLASS_BE;
			cfqq->ioprio_class = IOPRIO_CLASS_BE;
			break;
			break;
		case IOPRIO_CLASS_RT:
		case IOPRIO_CLASS_RT:
			cfqq->ioprio = task_ioprio(tsk);
			cfqq->ioprio = task_ioprio(ioc);
			cfqq->ioprio_class = IOPRIO_CLASS_RT;
			cfqq->ioprio_class = IOPRIO_CLASS_RT;
			break;
			break;
		case IOPRIO_CLASS_BE:
		case IOPRIO_CLASS_BE:
			cfqq->ioprio = task_ioprio(tsk);
			cfqq->ioprio = task_ioprio(ioc);
			cfqq->ioprio_class = IOPRIO_CLASS_BE;
			cfqq->ioprio_class = IOPRIO_CLASS_BE;
			break;
			break;
		case IOPRIO_CLASS_IDLE:
		case IOPRIO_CLASS_IDLE:
@@ -1316,7 +1327,7 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq)
	cfq_clear_cfqq_prio_changed(cfqq);
	cfq_clear_cfqq_prio_changed(cfqq);
}
}


static inline void changed_ioprio(struct cfq_io_context *cic)
static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
{
{
	struct cfq_data *cfqd = cic->key;
	struct cfq_data *cfqd = cic->key;
	struct cfq_queue *cfqq;
	struct cfq_queue *cfqq;
@@ -1330,8 +1341,7 @@ static inline void changed_ioprio(struct cfq_io_context *cic)
	cfqq = cic->cfqq[ASYNC];
	cfqq = cic->cfqq[ASYNC];
	if (cfqq) {
	if (cfqq) {
		struct cfq_queue *new_cfqq;
		struct cfq_queue *new_cfqq;
		new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc->task,
		new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc, GFP_ATOMIC);
					 GFP_ATOMIC);
		if (new_cfqq) {
		if (new_cfqq) {
			cic->cfqq[ASYNC] = new_cfqq;
			cic->cfqq[ASYNC] = new_cfqq;
			cfq_put_queue(cfqq);
			cfq_put_queue(cfqq);
@@ -1347,29 +1357,19 @@ static inline void changed_ioprio(struct cfq_io_context *cic)


static void cfq_ioc_set_ioprio(struct io_context *ioc)
static void cfq_ioc_set_ioprio(struct io_context *ioc)
{
{
	struct cfq_io_context *cic;
	call_for_each_cic(ioc, changed_ioprio);
	struct rb_node *n;

	ioc->ioprio_changed = 0;
	ioc->ioprio_changed = 0;

	n = rb_first(&ioc->cic_root);
	while (n != NULL) {
		cic = rb_entry(n, struct cfq_io_context, rb_node);

		changed_ioprio(cic);
		n = rb_next(n);
	}
}
}


static struct cfq_queue *
static struct cfq_queue *
cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
		     struct task_struct *tsk, gfp_t gfp_mask)
		     struct io_context *ioc, gfp_t gfp_mask)
{
{
	struct cfq_queue *cfqq, *new_cfqq = NULL;
	struct cfq_queue *cfqq, *new_cfqq = NULL;
	struct cfq_io_context *cic;
	struct cfq_io_context *cic;


retry:
retry:
	cic = cfq_cic_rb_lookup(cfqd, tsk->io_context);
	cic = cfq_cic_lookup(cfqd, ioc);
	/* cic always exists here */
	/* cic always exists here */
	cfqq = cic_to_cfqq(cic, is_sync);
	cfqq = cic_to_cfqq(cic, is_sync);


@@ -1404,15 +1404,16 @@ retry:
		atomic_set(&cfqq->ref, 0);
		atomic_set(&cfqq->ref, 0);
		cfqq->cfqd = cfqd;
		cfqq->cfqd = cfqd;


		cfq_mark_cfqq_prio_changed(cfqq);
		cfq_mark_cfqq_queue_new(cfqq);

		cfq_init_prio_data(cfqq, ioc);

		if (is_sync) {
		if (is_sync) {
			if (!cfq_class_idle(cfqq))
				cfq_mark_cfqq_idle_window(cfqq);
				cfq_mark_cfqq_idle_window(cfqq);
			cfq_mark_cfqq_sync(cfqq);
			cfq_mark_cfqq_sync(cfqq);
		}
		}

		cfq_mark_cfqq_prio_changed(cfqq);
		cfq_mark_cfqq_queue_new(cfqq);

		cfq_init_prio_data(cfqq);
	}
	}


	if (new_cfqq)
	if (new_cfqq)
@@ -1439,11 +1440,11 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
}
}


static struct cfq_queue *
static struct cfq_queue *
cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc,
	      gfp_t gfp_mask)
	      gfp_t gfp_mask)
{
{
	const int ioprio = task_ioprio(tsk);
	const int ioprio = task_ioprio(ioc);
	const int ioprio_class = task_ioprio_class(tsk);
	const int ioprio_class = task_ioprio_class(ioc);
	struct cfq_queue **async_cfqq = NULL;
	struct cfq_queue **async_cfqq = NULL;
	struct cfq_queue *cfqq = NULL;
	struct cfq_queue *cfqq = NULL;


@@ -1453,7 +1454,7 @@ cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
	}
	}


	if (!cfqq) {
	if (!cfqq) {
		cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask);
		cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
		if (!cfqq)
		if (!cfqq)
			return NULL;
			return NULL;
	}
	}
@@ -1470,28 +1471,42 @@ cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
	return cfqq;
	return cfqq;
}
}


static void cfq_cic_free(struct cfq_io_context *cic)
{
	kmem_cache_free(cfq_ioc_pool, cic);
	elv_ioc_count_dec(ioc_count);

	if (ioc_gone && !elv_ioc_count_read(ioc_count))
		complete(ioc_gone);
}

/*
/*
 * We drop cfq io contexts lazily, so we may find a dead one.
 * We drop cfq io contexts lazily, so we may find a dead one.
 */
 */
static void
static void
cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
		  struct cfq_io_context *cic)
{
{
	unsigned long flags;

	WARN_ON(!list_empty(&cic->queue_list));
	WARN_ON(!list_empty(&cic->queue_list));


	spin_lock_irqsave(&ioc->lock, flags);

	if (ioc->ioc_data == cic)
	if (ioc->ioc_data == cic)
		ioc->ioc_data = NULL;
		rcu_assign_pointer(ioc->ioc_data, NULL);


	rb_erase(&cic->rb_node, &ioc->cic_root);
	radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd);
	kmem_cache_free(cfq_ioc_pool, cic);
	spin_unlock_irqrestore(&ioc->lock, flags);
	elv_ioc_count_dec(ioc_count);

	cfq_cic_free(cic);
}
}


static struct cfq_io_context *
static struct cfq_io_context *
cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
{
{
	struct rb_node *n;
	struct cfq_io_context *cic;
	struct cfq_io_context *cic;
	void *k, *key = cfqd;
	void *k;


	if (unlikely(!ioc))
	if (unlikely(!ioc))
		return NULL;
		return NULL;
@@ -1499,75 +1514,65 @@ cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
	/*
	/*
	 * we maintain a last-hit cache, to avoid browsing over the tree
	 * we maintain a last-hit cache, to avoid browsing over the tree
	 */
	 */
	cic = ioc->ioc_data;
	cic = rcu_dereference(ioc->ioc_data);
	if (cic && cic->key == cfqd)
	if (cic && cic->key == cfqd)
		return cic;
		return cic;


restart:
	do {
	n = ioc->cic_root.rb_node;
		rcu_read_lock();
	while (n) {
		cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd);
		cic = rb_entry(n, struct cfq_io_context, rb_node);
		rcu_read_unlock();
		if (!cic)
			break;
		/* ->key must be copied to avoid race with cfq_exit_queue() */
		/* ->key must be copied to avoid race with cfq_exit_queue() */
		k = cic->key;
		k = cic->key;
		if (unlikely(!k)) {
		if (unlikely(!k)) {
			cfq_drop_dead_cic(ioc, cic);
			cfq_drop_dead_cic(cfqd, ioc, cic);
			goto restart;
			continue;
		}
		}


		if (key < k)
		rcu_assign_pointer(ioc->ioc_data, cic);
			n = n->rb_left;
		break;
		else if (key > k)
	} while (1);
			n = n->rb_right;
		else {
			ioc->ioc_data = cic;
			return cic;
		}
	}


	return NULL;
	return cic;
}
}


static inline void
/*
cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
 * Add cic into ioc, using cfqd as the search key. This enables us to lookup
	     struct cfq_io_context *cic)
 * the process specific cfq io context when entered from the block layer.
 * Also adds the cic to a per-cfqd list, used when this queue is removed.
 */
static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
			struct cfq_io_context *cic, gfp_t gfp_mask)
{
{
	struct rb_node **p;
	struct rb_node *parent;
	struct cfq_io_context *__cic;
	unsigned long flags;
	unsigned long flags;
	void *k;
	int ret;


	ret = radix_tree_preload(gfp_mask);
	if (!ret) {
		cic->ioc = ioc;
		cic->ioc = ioc;
		cic->key = cfqd;
		cic->key = cfqd;


restart:
		spin_lock_irqsave(&ioc->lock, flags);
	parent = NULL;
		ret = radix_tree_insert(&ioc->radix_root,
	p = &ioc->cic_root.rb_node;
						(unsigned long) cfqd, cic);
	while (*p) {
		spin_unlock_irqrestore(&ioc->lock, flags);
		parent = *p;
		__cic = rb_entry(parent, struct cfq_io_context, rb_node);
		/* ->key must be copied to avoid race with cfq_exit_queue() */
		k = __cic->key;
		if (unlikely(!k)) {
			cfq_drop_dead_cic(ioc, __cic);
			goto restart;
		}

		if (cic->key < k)
			p = &(*p)->rb_left;
		else if (cic->key > k)
			p = &(*p)->rb_right;
		else
			BUG();
	}


	rb_link_node(&cic->rb_node, parent, p);
		radix_tree_preload_end();
	rb_insert_color(&cic->rb_node, &ioc->cic_root);


		if (!ret) {
			spin_lock_irqsave(cfqd->queue->queue_lock, flags);
			spin_lock_irqsave(cfqd->queue->queue_lock, flags);
			list_add(&cic->queue_list, &cfqd->cic_list);
			list_add(&cic->queue_list, &cfqd->cic_list);
			spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
			spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
		}
		}
	}

	if (ret)
		printk(KERN_ERR "cfq: cic link failed!\n");

	return ret;
}


/*
/*
 * Setup general io context and cfq io context. There can be several cfq
 * Setup general io context and cfq io context. There can be several cfq
@@ -1586,7 +1591,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
	if (!ioc)
	if (!ioc)
		return NULL;
		return NULL;


	cic = cfq_cic_rb_lookup(cfqd, ioc);
	cic = cfq_cic_lookup(cfqd, ioc);
	if (cic)
	if (cic)
		goto out;
		goto out;


@@ -1594,13 +1599,17 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
	if (cic == NULL)
	if (cic == NULL)
		goto err;
		goto err;


	cfq_cic_link(cfqd, ioc, cic);
	if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
		goto err_free;

out:
out:
	smp_read_barrier_depends();
	smp_read_barrier_depends();
	if (unlikely(ioc->ioprio_changed))
	if (unlikely(ioc->ioprio_changed))
		cfq_ioc_set_ioprio(ioc);
		cfq_ioc_set_ioprio(ioc);


	return cic;
	return cic;
err_free:
	cfq_cic_free(cic);
err:
err:
	put_io_context(ioc);
	put_io_context(ioc);
	return NULL;
	return NULL;
@@ -1655,12 +1664,15 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
{
{
	int enable_idle;
	int enable_idle;


	if (!cfq_cfqq_sync(cfqq))
	/*
	 * Don't idle for async or idle io prio class
	 */
	if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
		return;
		return;


	enable_idle = cfq_cfqq_idle_window(cfqq);
	enable_idle = cfq_cfqq_idle_window(cfqq);


	if (!cic->ioc->task || !cfqd->cfq_slice_idle ||
	if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
	    (cfqd->hw_tag && CIC_SEEKY(cic)))
	    (cfqd->hw_tag && CIC_SEEKY(cic)))
		enable_idle = 0;
		enable_idle = 0;
	else if (sample_valid(cic->ttime_samples)) {
	else if (sample_valid(cic->ttime_samples)) {
@@ -1793,7 +1805,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
	struct cfq_data *cfqd = q->elevator->elevator_data;
	struct cfq_data *cfqd = q->elevator->elevator_data;
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
	struct cfq_queue *cfqq = RQ_CFQQ(rq);


	cfq_init_prio_data(cfqq);
	cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);


	cfq_add_rq_rb(rq);
	cfq_add_rq_rb(rq);


@@ -1834,7 +1846,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
			cfq_set_prio_slice(cfqd, cfqq);
			cfq_set_prio_slice(cfqd, cfqq);
			cfq_clear_cfqq_slice_new(cfqq);
			cfq_clear_cfqq_slice_new(cfqq);
		}
		}
		if (cfq_slice_used(cfqq))
		if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
			cfq_slice_expired(cfqd, 1);
			cfq_slice_expired(cfqd, 1);
		else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list))
		else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list))
			cfq_arm_slice_timer(cfqd);
			cfq_arm_slice_timer(cfqd);
@@ -1894,13 +1906,13 @@ static int cfq_may_queue(struct request_queue *q, int rw)
	 * so just lookup a possibly existing queue, or return 'may queue'
	 * so just lookup a possibly existing queue, or return 'may queue'
	 * if that fails
	 * if that fails
	 */
	 */
	cic = cfq_cic_rb_lookup(cfqd, tsk->io_context);
	cic = cfq_cic_lookup(cfqd, tsk->io_context);
	if (!cic)
	if (!cic)
		return ELV_MQUEUE_MAY;
		return ELV_MQUEUE_MAY;


	cfqq = cic_to_cfqq(cic, rw & REQ_RW_SYNC);
	cfqq = cic_to_cfqq(cic, rw & REQ_RW_SYNC);
	if (cfqq) {
	if (cfqq) {
		cfq_init_prio_data(cfqq);
		cfq_init_prio_data(cfqq, cic->ioc);
		cfq_prio_boost(cfqq);
		cfq_prio_boost(cfqq);


		return __cfq_may_queue(cfqq);
		return __cfq_may_queue(cfqq);
@@ -1938,7 +1950,6 @@ static int
cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
{
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
	struct cfq_data *cfqd = q->elevator->elevator_data;
	struct task_struct *tsk = current;
	struct cfq_io_context *cic;
	struct cfq_io_context *cic;
	const int rw = rq_data_dir(rq);
	const int rw = rq_data_dir(rq);
	const int is_sync = rq_is_sync(rq);
	const int is_sync = rq_is_sync(rq);
@@ -1956,7 +1967,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)


	cfqq = cic_to_cfqq(cic, is_sync);
	cfqq = cic_to_cfqq(cic, is_sync);
	if (!cfqq) {
	if (!cfqq) {
		cfqq = cfq_get_queue(cfqd, is_sync, tsk, gfp_mask);
		cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);


		if (!cfqq)
		if (!cfqq)
			goto queue_fail;
			goto queue_fail;
@@ -2039,29 +2050,9 @@ out_cont:
	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
}
}


/*
 * Timer running if an idle class queue is waiting for service
 */
static void cfq_idle_class_timer(unsigned long data)
{
	struct cfq_data *cfqd = (struct cfq_data *) data;
	unsigned long flags;

	spin_lock_irqsave(cfqd->queue->queue_lock, flags);

	/*
	 * race with a non-idle queue, reset timer
	 */
	if (!start_idle_class_timer(cfqd))
		cfq_schedule_dispatch(cfqd);

	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
}

static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
{
{
	del_timer_sync(&cfqd->idle_slice_timer);
	del_timer_sync(&cfqd->idle_slice_timer);
	del_timer_sync(&cfqd->idle_class_timer);
	kblockd_flush_work(&cfqd->unplug_work);
	kblockd_flush_work(&cfqd->unplug_work);
}
}


@@ -2126,10 +2117,6 @@ static void *cfq_init_queue(struct request_queue *q)
	cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
	cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
	cfqd->idle_slice_timer.data = (unsigned long) cfqd;
	cfqd->idle_slice_timer.data = (unsigned long) cfqd;


	init_timer(&cfqd->idle_class_timer);
	cfqd->idle_class_timer.function = cfq_idle_class_timer;
	cfqd->idle_class_timer.data = (unsigned long) cfqd;

	INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
	INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);


	cfqd->last_end_request = jiffies;
	cfqd->last_end_request = jiffies;
@@ -2160,7 +2147,7 @@ static int __init cfq_slab_setup(void)
	if (!cfq_pool)
	if (!cfq_pool)
		goto fail;
		goto fail;


	cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
	cfq_ioc_pool = KMEM_CACHE(cfq_io_context, SLAB_DESTROY_BY_RCU);
	if (!cfq_ioc_pool)
	if (!cfq_ioc_pool)
		goto fail;
		goto fail;


Loading