Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d2dd328b authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block: (27 commits)
  block: make blk_init_free_list and elevator_init idempotent
  block: avoid unconditionally freeing previously allocated request_queue
  pipe: change /proc/sys/fs/pipe-max-pages to byte sized interface
  pipe: change the privilege required for growing a pipe beyond system max
  pipe: adjust minimum pipe size to 1 page
  block: disable preemption before using sched_clock()
  cciss: call BUG() earlier
  Preparing 8.3.8rc2
  drbd: Reduce verbosity
  drbd: use drbd specific ratelimit instead of global printk_ratelimit
  drbd: fix hang on local read errors while disconnected
  drbd: Removed the now empty w_io_error() function
  drbd: removed duplicated #includes
  drbd: improve usage of MSG_MORE
  drbd: need to set socket bufsize early to take effect
  drbd: improve network latency, TCP_QUICKACK
  drbd: Revert "drbd: Create new current UUID as late as possible"
  brd: support discard
  Revert "writeback: fix WB_SYNC_NONE writeback from umount"
  Revert "writeback: ensure that WB_SYNC_NONE writeback with sb pinned is sync"
  ...
parents c1518f12 1abec4fd
Loading
Loading
Loading
Loading
+14 −6
Original line number Diff line number Diff line
@@ -467,6 +467,9 @@ static int blk_init_free_list(struct request_queue *q)
{
	struct request_list *rl = &q->rq;

	if (unlikely(rl->rq_pool))
		return 0;

	rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
	rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
	rl->elvpriv = 0;
@@ -570,9 +573,17 @@ EXPORT_SYMBOL(blk_init_queue);
struct request_queue *
blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
{
	struct request_queue *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
	struct request_queue *uninit_q, *q;

	uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id);
	if (!uninit_q)
		return NULL;

	q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id);
	if (!q)
		blk_cleanup_queue(uninit_q);

	return blk_init_allocated_queue_node(q, rfn, lock, node_id);
	return q;
}
EXPORT_SYMBOL(blk_init_queue_node);

@@ -592,10 +603,8 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
		return NULL;

	q->node = node_id;
	if (blk_init_free_list(q)) {
		kmem_cache_free(blk_requestq_cachep, q);
	if (blk_init_free_list(q))
		return NULL;
	}

	q->request_fn		= rfn;
	q->prep_rq_fn		= NULL;
@@ -618,7 +627,6 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
		return q;
	}

	blk_put_queue(q);
	return NULL;
}
EXPORT_SYMBOL(blk_init_allocated_queue_node);
+79 −22
Original line number Diff line number Diff line
@@ -64,6 +64,9 @@ static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
static struct completion *ioc_gone;
static DEFINE_SPINLOCK(ioc_gone_lock);

static DEFINE_SPINLOCK(cic_index_lock);
static DEFINE_IDA(cic_index_ida);

#define CFQ_PRIO_LISTS		IOPRIO_BE_NR
#define cfq_class_idle(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
#define cfq_class_rt(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
@@ -271,6 +274,7 @@ struct cfq_data {
	unsigned int cfq_latency;
	unsigned int cfq_group_isolation;

	unsigned int cic_index;
	struct list_head cic_list;

	/*
@@ -430,6 +434,24 @@ static inline void cic_set_cfqq(struct cfq_io_context *cic,
	cic->cfqq[is_sync] = cfqq;
}

#define CIC_DEAD_KEY	1ul
#define CIC_DEAD_INDEX_SHIFT	1

static inline void *cfqd_dead_key(struct cfq_data *cfqd)
{
	return (void *)(cfqd->cic_index << CIC_DEAD_INDEX_SHIFT | CIC_DEAD_KEY);
}

static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic)
{
	struct cfq_data *cfqd = cic->key;

	if (unlikely((unsigned long) cfqd & CIC_DEAD_KEY))
		return NULL;

	return cfqd;
}

/*
 * We regard a request as SYNC, if it's either a read or has the SYNC bit
 * set (in which case it could also be direct WRITE).
@@ -2510,11 +2532,12 @@ static void cfq_cic_free(struct cfq_io_context *cic)
static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
{
	unsigned long flags;
	unsigned long dead_key = (unsigned long) cic->key;

	BUG_ON(!cic->dead_key);
	BUG_ON(!(dead_key & CIC_DEAD_KEY));

	spin_lock_irqsave(&ioc->lock, flags);
	radix_tree_delete(&ioc->radix_root, cic->dead_key);
	radix_tree_delete(&ioc->radix_root, dead_key >> CIC_DEAD_INDEX_SHIFT);
	hlist_del_rcu(&cic->cic_list);
	spin_unlock_irqrestore(&ioc->lock, flags);

@@ -2537,15 +2560,10 @@ static void cfq_free_io_context(struct io_context *ioc)
	__call_for_each_cic(ioc, cic_free_func);
}

static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
static void cfq_put_cooperator(struct cfq_queue *cfqq)
{
	struct cfq_queue *__cfqq, *next;

	if (unlikely(cfqq == cfqd->active_queue)) {
		__cfq_slice_expired(cfqd, cfqq, 0);
		cfq_schedule_dispatch(cfqd);
	}

	/*
	 * If this queue was scheduled to merge with another queue, be
	 * sure to drop the reference taken on that queue (and others in
@@ -2561,6 +2579,16 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
		cfq_put_queue(__cfqq);
		__cfqq = next;
	}
}

static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	if (unlikely(cfqq == cfqd->active_queue)) {
		__cfq_slice_expired(cfqd, cfqq, 0);
		cfq_schedule_dispatch(cfqd);
	}

	cfq_put_cooperator(cfqq);

	cfq_put_queue(cfqq);
}
@@ -2573,11 +2601,10 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
	list_del_init(&cic->queue_list);

	/*
	 * Make sure key == NULL is seen for dead queues
	 * Make sure dead mark is seen for dead queues
	 */
	smp_wmb();
	cic->dead_key = (unsigned long) cic->key;
	cic->key = NULL;
	cic->key = cfqd_dead_key(cfqd);

	if (ioc->ioc_data == cic)
		rcu_assign_pointer(ioc->ioc_data, NULL);
@@ -2596,7 +2623,7 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
static void cfq_exit_single_io_context(struct io_context *ioc,
				       struct cfq_io_context *cic)
{
	struct cfq_data *cfqd = cic->key;
	struct cfq_data *cfqd = cic_to_cfqd(cic);

	if (cfqd) {
		struct request_queue *q = cfqd->queue;
@@ -2609,7 +2636,7 @@ static void cfq_exit_single_io_context(struct io_context *ioc,
		 * race between exiting task and queue
		 */
		smp_read_barrier_depends();
		if (cic->key)
		if (cic->key == cfqd)
			__cfq_exit_single_io_context(cfqd, cic);

		spin_unlock_irqrestore(q->queue_lock, flags);
@@ -2689,7 +2716,7 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)

static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
{
	struct cfq_data *cfqd = cic->key;
	struct cfq_data *cfqd = cic_to_cfqd(cic);
	struct cfq_queue *cfqq;
	unsigned long flags;

@@ -2746,7 +2773,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic)
{
	struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
	struct cfq_data *cfqd = cic->key;
	struct cfq_data *cfqd = cic_to_cfqd(cic);
	unsigned long flags;
	struct request_queue *q;

@@ -2883,12 +2910,13 @@ cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
	unsigned long flags;

	WARN_ON(!list_empty(&cic->queue_list));
	BUG_ON(cic->key != cfqd_dead_key(cfqd));

	spin_lock_irqsave(&ioc->lock, flags);

	BUG_ON(ioc->ioc_data == cic);

	radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd);
	radix_tree_delete(&ioc->radix_root, cfqd->cic_index);
	hlist_del_rcu(&cic->cic_list);
	spin_unlock_irqrestore(&ioc->lock, flags);

@@ -2900,7 +2928,6 @@ cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
{
	struct cfq_io_context *cic;
	unsigned long flags;
	void *k;

	if (unlikely(!ioc))
		return NULL;
@@ -2917,13 +2944,11 @@ cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
	}

	do {
		cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd);
		cic = radix_tree_lookup(&ioc->radix_root, cfqd->cic_index);
		rcu_read_unlock();
		if (!cic)
			break;
		/* ->key must be copied to avoid race with cfq_exit_queue() */
		k = cic->key;
		if (unlikely(!k)) {
		if (unlikely(cic->key != cfqd)) {
			cfq_drop_dead_cic(cfqd, ioc, cic);
			rcu_read_lock();
			continue;
@@ -2956,7 +2981,7 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,

		spin_lock_irqsave(&ioc->lock, flags);
		ret = radix_tree_insert(&ioc->radix_root,
						(unsigned long) cfqd, cic);
						cfqd->cic_index, cic);
		if (!ret)
			hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
		spin_unlock_irqrestore(&ioc->lock, flags);
@@ -3516,6 +3541,9 @@ split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
	}

	cic_set_cfqq(cic, NULL, 1);

	cfq_put_cooperator(cfqq);

	cfq_put_queue(cfqq);
	return NULL;
}
@@ -3708,10 +3736,32 @@ static void cfq_exit_queue(struct elevator_queue *e)

	cfq_shutdown_timer_wq(cfqd);

	spin_lock(&cic_index_lock);
	ida_remove(&cic_index_ida, cfqd->cic_index);
	spin_unlock(&cic_index_lock);

	/* Wait for cfqg->blkg->key accessors to exit their grace periods. */
	call_rcu(&cfqd->rcu, cfq_cfqd_free);
}

static int cfq_alloc_cic_index(void)
{
	int index, error;

	do {
		if (!ida_pre_get(&cic_index_ida, GFP_KERNEL))
			return -ENOMEM;

		spin_lock(&cic_index_lock);
		error = ida_get_new(&cic_index_ida, &index);
		spin_unlock(&cic_index_lock);
		if (error && error != -EAGAIN)
			return error;
	} while (error);

	return index;
}

static void *cfq_init_queue(struct request_queue *q)
{
	struct cfq_data *cfqd;
@@ -3719,10 +3769,16 @@ static void *cfq_init_queue(struct request_queue *q)
	struct cfq_group *cfqg;
	struct cfq_rb_root *st;

	i = cfq_alloc_cic_index();
	if (i < 0)
		return NULL;

	cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
	if (!cfqd)
		return NULL;

	cfqd->cic_index = i;

	/* Init root service tree */
	cfqd->grp_service_tree = CFQ_RB_ROOT;

@@ -3984,6 +4040,7 @@ static void __exit cfq_exit(void)
	 */
	if (elv_ioc_count_read(cfq_ioc_count))
		wait_for_completion(&all_gone);
	ida_destroy(&cic_index_ida);
	cfq_slab_kill();
}

+5 −3
Original line number Diff line number Diff line
@@ -242,9 +242,11 @@ int elevator_init(struct request_queue *q, char *name)
{
	struct elevator_type *e = NULL;
	struct elevator_queue *eq;
	int ret = 0;
	void *data;

	if (unlikely(q->elevator))
		return 0;

	INIT_LIST_HEAD(&q->queue_head);
	q->last_merge = NULL;
	q->end_sector = 0;
@@ -284,7 +286,7 @@ int elevator_init(struct request_queue *q, char *name)
	}

	elevator_attach(q, eq, data);
	return ret;
	return 0;
}
EXPORT_SYMBOL(elevator_init);

@@ -1097,7 +1099,7 @@ ssize_t elv_iosched_show(struct request_queue *q, char *name)
	struct elevator_type *__e;
	int len = 0;

	if (!q->elevator)
	if (!q->elevator || !blk_queue_stackable(q))
		return sprintf(name, "none\n");

	elv = e->elevator_type;
+52 −1
Original line number Diff line number Diff line
@@ -133,6 +133,28 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
	return page;
}

static void brd_free_page(struct brd_device *brd, sector_t sector)
{
	struct page *page;
	pgoff_t idx;

	spin_lock(&brd->brd_lock);
	idx = sector >> PAGE_SECTORS_SHIFT;
	page = radix_tree_delete(&brd->brd_pages, idx);
	spin_unlock(&brd->brd_lock);
	if (page)
		__free_page(page);
}

static void brd_zero_page(struct brd_device *brd, sector_t sector)
{
	struct page *page;

	page = brd_lookup_page(brd, sector);
	if (page)
		clear_highpage(page);
}

/*
 * Free all backing store pages and radix tree. This must only be called when
 * there are no other users of the device.
@@ -189,6 +211,24 @@ static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
	return 0;
}

static void discard_from_brd(struct brd_device *brd,
			sector_t sector, size_t n)
{
	while (n >= PAGE_SIZE) {
		/*
		 * Don't want to actually discard pages here because
		 * re-allocating the pages can result in writeback
		 * deadlocks under heavy load.
		 */
		if (0)
			brd_free_page(brd, sector);
		else
			brd_zero_page(brd, sector);
		sector += PAGE_SIZE >> SECTOR_SHIFT;
		n -= PAGE_SIZE;
	}
}

/*
 * Copy n bytes from src to the brd starting at sector. Does not sleep.
 */
@@ -300,6 +340,12 @@ static int brd_make_request(struct request_queue *q, struct bio *bio)
						get_capacity(bdev->bd_disk))
		goto out;

	if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) {
		err = 0;
		discard_from_brd(brd, sector, bio->bi_size);
		goto out;
	}

	rw = bio_rw(bio);
	if (rw == READA)
		rw = READ;
@@ -437,6 +483,11 @@ static struct brd_device *brd_alloc(int i)
	blk_queue_max_hw_sectors(brd->brd_queue, 1024);
	blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);

	brd->brd_queue->limits.discard_granularity = PAGE_SIZE;
	brd->brd_queue->limits.max_discard_sectors = UINT_MAX;
	brd->brd_queue->limits.discard_zeroes_data = 1;
	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue);

	disk = brd->brd_disk = alloc_disk(1 << part_shift);
	if (!disk)
		goto out_free_queue;
+1 −1
Original line number Diff line number Diff line
@@ -188,11 +188,11 @@ scsi_cmd_free(ctlr_info_t *h, CommandList_struct *cmd)

	sa = h->scsi_ctlr;
	stk = &sa->cmd_stack; 
	stk->top++;
	if (stk->top >= CMD_STACK_SIZE) {
		printk("cciss: scsi_cmd_free called too many times.\n");
		BUG();
	}
	stk->top++;
	stk->elem[stk->top] = (struct cciss_scsi_cmd_stack_elem_t *) cmd;
}

Loading