Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 096a705b authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-3.1/core' of git://git.kernel.dk/linux-block

* 'for-3.1/core' of git://git.kernel.dk/linux-block: (24 commits)
  block: strict rq_affinity
  backing-dev: use synchronize_rcu_expedited instead of synchronize_rcu
  block: fix patch import error in max_discard_sectors check
  block: reorder request_queue to remove 64 bit alignment padding
  CFQ: add think time check for group
  CFQ: add think time check for service tree
  CFQ: move think time check variables to a separate struct
  fixlet: Remove fs_excl from struct task.
  cfq: Remove special treatment for metadata rqs.
  block: document blk_plug list access
  block: avoid building too big plug list
  compat_ioctl: fix make headers_check regression
  block: eliminate potential for infinite loop in blkdev_issue_discard
  compat_ioctl: fix warning caused by qemu
  block: flush MEDIA_CHANGE from drivers on close(2)
  blk-throttle: Make total_nr_queued unsigned
  block: Add __attribute__((format(printf...) and fix fallout
  fs/partitions/check.c: make local symbols static
  block:remove some spare spaces in genhd.c
  block:fix the comment error in blkdev.h
  ...
parents fea80311 5757a6d7
Loading
Loading
Loading
Loading
+7 −3
Original line number Diff line number Diff line
@@ -45,9 +45,13 @@ device.

rq_affinity (RW)
----------------
If this option is enabled, the block layer will migrate request completions
to the CPU that originally submitted the request. For some workloads
this provides a significant reduction in CPU cycles due to caching effects.
If this option is '1', the block layer will migrate request completions to the
cpu "group" that originally submitted the request. For some workloads this
provides a significant reduction in CPU cycles due to caching effects.

For storage configurations that need to maximize distribution of completion
processing setting this option to '2' forces the completion to run on the
requesting cpu (bypassing the "group" aggregation logic).

scheduler (RW)
--------------
+7 −4
Original line number Diff line number Diff line
@@ -1282,10 +1282,8 @@ get_rq:
	init_request_from_bio(req, bio);

	if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
	    bio_flagged(bio, BIO_CPU_AFFINE)) {
		req->cpu = blk_cpu_to_group(get_cpu());
		put_cpu();
	}
	    bio_flagged(bio, BIO_CPU_AFFINE))
		req->cpu = smp_processor_id();

	plug = current->plug;
	if (plug) {
@@ -1305,7 +1303,10 @@ get_rq:
				plug->should_sort = 1;
		}
		list_add_tail(&req->queuelist, &plug->list);
		plug->count++;
		drive_stat_acct(req, 1);
		if (plug->count >= BLK_MAX_REQUEST_COUNT)
			blk_flush_plug_list(plug, false);
	} else {
		spin_lock_irq(q->queue_lock);
		add_acct_request(q, req, where);
@@ -2629,6 +2630,7 @@ void blk_start_plug(struct blk_plug *plug)
	INIT_LIST_HEAD(&plug->list);
	INIT_LIST_HEAD(&plug->cb_list);
	plug->should_sort = 0;
	plug->count = 0;

	/*
	 * If this is a nested plug, don't actually assign it. It will be
@@ -2712,6 +2714,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
		return;

	list_splice_init(&plug->list, &list);
	plug->count = 0;

	if (plug->should_sort) {
		list_sort(NULL, &list, plug_rq_cmp);
+20 −20
Original line number Diff line number Diff line
@@ -82,26 +82,26 @@ void exit_io_context(struct task_struct *task)

struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
{
	struct io_context *ret;
	struct io_context *ioc;

	ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
	if (ret) {
		atomic_long_set(&ret->refcount, 1);
		atomic_set(&ret->nr_tasks, 1);
		spin_lock_init(&ret->lock);
		ret->ioprio_changed = 0;
		ret->ioprio = 0;
		ret->last_waited = 0; /* doesn't matter... */
		ret->nr_batch_requests = 0; /* because this is 0 */
		INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
		INIT_HLIST_HEAD(&ret->cic_list);
		ret->ioc_data = NULL;
	ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
	if (ioc) {
		atomic_long_set(&ioc->refcount, 1);
		atomic_set(&ioc->nr_tasks, 1);
		spin_lock_init(&ioc->lock);
		ioc->ioprio_changed = 0;
		ioc->ioprio = 0;
		ioc->last_waited = 0; /* doesn't matter... */
		ioc->nr_batch_requests = 0; /* because this is 0 */
		INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH);
		INIT_HLIST_HEAD(&ioc->cic_list);
		ioc->ioc_data = NULL;
#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
		ret->cgroup_changed = 0;
		ioc->cgroup_changed = 0;
#endif
	}

	return ret;
	return ioc;
}

/*
@@ -139,19 +139,19 @@ struct io_context *current_io_context(gfp_t gfp_flags, int node)
 */
struct io_context *get_io_context(gfp_t gfp_flags, int node)
{
	struct io_context *ret = NULL;
	struct io_context *ioc = NULL;

	/*
	 * Check for unlikely race with exiting task. ioc ref count is
	 * zero when ioc is being detached.
	 */
	do {
		ret = current_io_context(gfp_flags, node);
		if (unlikely(!ret))
		ioc = current_io_context(gfp_flags, node);
		if (unlikely(!ioc))
			break;
	} while (!atomic_long_inc_not_zero(&ret->refcount));
	} while (!atomic_long_inc_not_zero(&ioc->refcount));

	return ret;
	return ioc;
}
EXPORT_SYMBOL(get_io_context);

+4 −1
Original line number Diff line number Diff line
@@ -59,7 +59,10 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
	 * granularity
	 */
	max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
	if (q->limits.discard_granularity) {
	if (unlikely(!max_discard_sectors)) {
		/* Avoid infinite loop below. Being cautious never hurts. */
		return -EOPNOTSUPP;
	} else if (q->limits.discard_granularity) {
		unsigned int disc_sects = q->limits.discard_granularity >> 9;

		max_discard_sectors &= ~(disc_sects - 1);
+7 −4
Original line number Diff line number Diff line
@@ -103,22 +103,25 @@ static struct notifier_block __cpuinitdata blk_cpu_notifier = {

void __blk_complete_request(struct request *req)
{
	int ccpu, cpu, group_cpu = NR_CPUS;
	struct request_queue *q = req->q;
	unsigned long flags;
	int ccpu, cpu, group_cpu;

	BUG_ON(!q->softirq_done_fn);

	local_irq_save(flags);
	cpu = smp_processor_id();
	group_cpu = blk_cpu_to_group(cpu);

	/*
	 * Select completion CPU
	 */
	if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && req->cpu != -1)
	if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && req->cpu != -1) {
		ccpu = req->cpu;
	else
		if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) {
			ccpu = blk_cpu_to_group(ccpu);
			group_cpu = blk_cpu_to_group(cpu);
		}
	} else
		ccpu = cpu;

	if (ccpu == cpu || ccpu == group_cpu) {
Loading