Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bd5d435a authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
  block: Skip I/O merges when disabled
  block: add large command support
  block: replace sizeof(rq->cmd) with BLK_MAX_CDB
  ide: use blk_rq_init() to initialize the request
  block: use blk_rq_init() to initialize the request
  block: rename and export rq_init()
  block: no need to initialize rq->cmd with blk_get_request
  block: no need to initialize rq->cmd in prepare_flush_fn hook
  block/blk-barrier.c:blk_ordered_cur_seq() mustn't be inline
  block/elevator.c:elv_rq_merge_ok() mustn't be inline
  block: make queue flags non-atomic
  block: add dma alignment and padding support to blk_rq_map_kern
  unexport blk_max_pfn
  ps3disk: Remove superfluous cast
  block: make rq_init() do a full memset()
  relay: fix splice problem
parents fee4b19f ac9fafa1
Loading
Loading
Loading
Loading
+3 −8
Original line number Original line Diff line number Diff line
@@ -53,7 +53,7 @@ EXPORT_SYMBOL(blk_queue_ordered);
/*
/*
 * Cache flushing for ordered writes handling
 * Cache flushing for ordered writes handling
 */
 */
inline unsigned blk_ordered_cur_seq(struct request_queue *q)
unsigned blk_ordered_cur_seq(struct request_queue *q)
{
{
	if (!q->ordseq)
	if (!q->ordseq)
		return 0;
		return 0;
@@ -143,10 +143,8 @@ static void queue_flush(struct request_queue *q, unsigned which)
		end_io = post_flush_end_io;
		end_io = post_flush_end_io;
	}
	}


	blk_rq_init(q, rq);
	rq->cmd_flags = REQ_HARDBARRIER;
	rq->cmd_flags = REQ_HARDBARRIER;
	rq_init(q, rq);
	rq->elevator_private = NULL;
	rq->elevator_private2 = NULL;
	rq->rq_disk = q->bar_rq.rq_disk;
	rq->rq_disk = q->bar_rq.rq_disk;
	rq->end_io = end_io;
	rq->end_io = end_io;
	q->prepare_flush_fn(q, rq);
	q->prepare_flush_fn(q, rq);
@@ -167,14 +165,11 @@ static inline struct request *start_ordered(struct request_queue *q,
	blkdev_dequeue_request(rq);
	blkdev_dequeue_request(rq);
	q->orig_bar_rq = rq;
	q->orig_bar_rq = rq;
	rq = &q->bar_rq;
	rq = &q->bar_rq;
	rq->cmd_flags = 0;
	blk_rq_init(q, rq);
	rq_init(q, rq);
	if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
	if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
		rq->cmd_flags |= REQ_RW;
		rq->cmd_flags |= REQ_RW;
	if (q->ordered & QUEUE_ORDERED_FUA)
	if (q->ordered & QUEUE_ORDERED_FUA)
		rq->cmd_flags |= REQ_FUA;
		rq->cmd_flags |= REQ_FUA;
	rq->elevator_private = NULL;
	rq->elevator_private2 = NULL;
	init_request_from_bio(rq, q->orig_bar_rq->bio);
	init_request_from_bio(rq, q->orig_bar_rq->bio);
	rq->end_io = bar_end_io;
	rq->end_io = bar_end_io;


+34 −41
Original line number Original line Diff line number Diff line
@@ -107,41 +107,21 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
}
}
EXPORT_SYMBOL(blk_get_backing_dev_info);
EXPORT_SYMBOL(blk_get_backing_dev_info);


/*
void blk_rq_init(struct request_queue *q, struct request *rq)
 * We can't just memset() the structure, since the allocation path
 * already stored some information in the request.
 */
void rq_init(struct request_queue *q, struct request *rq)
{
{
	memset(rq, 0, sizeof(*rq));

	INIT_LIST_HEAD(&rq->queuelist);
	INIT_LIST_HEAD(&rq->queuelist);
	INIT_LIST_HEAD(&rq->donelist);
	INIT_LIST_HEAD(&rq->donelist);
	rq->q = q;
	rq->q = q;
	rq->sector = rq->hard_sector = (sector_t) -1;
	rq->sector = rq->hard_sector = (sector_t) -1;
	rq->nr_sectors = rq->hard_nr_sectors = 0;
	rq->current_nr_sectors = rq->hard_cur_sectors = 0;
	rq->bio = rq->biotail = NULL;
	INIT_HLIST_NODE(&rq->hash);
	INIT_HLIST_NODE(&rq->hash);
	RB_CLEAR_NODE(&rq->rb_node);
	RB_CLEAR_NODE(&rq->rb_node);
	rq->rq_disk = NULL;
	rq->cmd = rq->__cmd;
	rq->nr_phys_segments = 0;
	rq->nr_hw_segments = 0;
	rq->ioprio = 0;
	rq->special = NULL;
	rq->buffer = NULL;
	rq->tag = -1;
	rq->tag = -1;
	rq->errors = 0;
	rq->ref_count = 1;
	rq->ref_count = 1;
	rq->cmd_len = 0;
	memset(rq->cmd, 0, sizeof(rq->cmd));
	rq->data_len = 0;
	rq->extra_len = 0;
	rq->sense_len = 0;
	rq->data = NULL;
	rq->sense = NULL;
	rq->end_io = NULL;
	rq->end_io_data = NULL;
	rq->next_rq = NULL;
}
}
EXPORT_SYMBOL(blk_rq_init);


static void req_bio_endio(struct request *rq, struct bio *bio,
static void req_bio_endio(struct request *rq, struct bio *bio,
			  unsigned int nbytes, int error)
			  unsigned int nbytes, int error)
@@ -194,7 +174,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg)


	if (blk_pc_request(rq)) {
	if (blk_pc_request(rq)) {
		printk(KERN_INFO "  cdb: ");
		printk(KERN_INFO "  cdb: ");
		for (bit = 0; bit < sizeof(rq->cmd); bit++)
		for (bit = 0; bit < BLK_MAX_CDB; bit++)
			printk("%02x ", rq->cmd[bit]);
			printk("%02x ", rq->cmd[bit]);
		printk("\n");
		printk("\n");
	}
	}
@@ -220,7 +200,8 @@ void blk_plug_device(struct request_queue *q)
	if (blk_queue_stopped(q))
	if (blk_queue_stopped(q))
		return;
		return;


	if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
	if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
		__set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags);
		mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
		mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
		blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
		blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
	}
	}
@@ -235,9 +216,10 @@ int blk_remove_plug(struct request_queue *q)
{
{
	WARN_ON(!irqs_disabled());
	WARN_ON(!irqs_disabled());


	if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
	if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
		return 0;
		return 0;


	queue_flag_clear(QUEUE_FLAG_PLUGGED, q);
	del_timer(&q->unplug_timer);
	del_timer(&q->unplug_timer);
	return 1;
	return 1;
}
}
@@ -333,15 +315,16 @@ void blk_start_queue(struct request_queue *q)
{
{
	WARN_ON(!irqs_disabled());
	WARN_ON(!irqs_disabled());


	clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
	queue_flag_clear(QUEUE_FLAG_STOPPED, q);


	/*
	/*
	 * one level of recursion is ok and is much faster than kicking
	 * one level of recursion is ok and is much faster than kicking
	 * the unplug handling
	 * the unplug handling
	 */
	 */
	if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
	if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
		queue_flag_set(QUEUE_FLAG_REENTER, q);
		q->request_fn(q);
		q->request_fn(q);
		clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
		queue_flag_clear(QUEUE_FLAG_REENTER, q);
	} else {
	} else {
		blk_plug_device(q);
		blk_plug_device(q);
		kblockd_schedule_work(&q->unplug_work);
		kblockd_schedule_work(&q->unplug_work);
@@ -366,7 +349,7 @@ EXPORT_SYMBOL(blk_start_queue);
void blk_stop_queue(struct request_queue *q)
void blk_stop_queue(struct request_queue *q)
{
{
	blk_remove_plug(q);
	blk_remove_plug(q);
	set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
	queue_flag_set(QUEUE_FLAG_STOPPED, q);
}
}
EXPORT_SYMBOL(blk_stop_queue);
EXPORT_SYMBOL(blk_stop_queue);


@@ -395,11 +378,8 @@ EXPORT_SYMBOL(blk_sync_queue);
 * blk_run_queue - run a single device queue
 * blk_run_queue - run a single device queue
 * @q:	The queue to run
 * @q:	The queue to run
 */
 */
void blk_run_queue(struct request_queue *q)
void __blk_run_queue(struct request_queue *q)
{
{
	unsigned long flags;

	spin_lock_irqsave(q->queue_lock, flags);
	blk_remove_plug(q);
	blk_remove_plug(q);


	/*
	/*
@@ -407,15 +387,28 @@ void blk_run_queue(struct request_queue *q)
	 * handling reinvoke the handler shortly if we already got there.
	 * handling reinvoke the handler shortly if we already got there.
	 */
	 */
	if (!elv_queue_empty(q)) {
	if (!elv_queue_empty(q)) {
		if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
		if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
			queue_flag_set(QUEUE_FLAG_REENTER, q);
			q->request_fn(q);
			q->request_fn(q);
			clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
			queue_flag_clear(QUEUE_FLAG_REENTER, q);
		} else {
		} else {
			blk_plug_device(q);
			blk_plug_device(q);
			kblockd_schedule_work(&q->unplug_work);
			kblockd_schedule_work(&q->unplug_work);
		}
		}
	}
	}
}
EXPORT_SYMBOL(__blk_run_queue);


/**
 * blk_run_queue - run a single device queue
 * @q: The queue to run
 */
void blk_run_queue(struct request_queue *q)
{
	unsigned long flags;

	spin_lock_irqsave(q->queue_lock, flags);
	__blk_run_queue(q);
	spin_unlock_irqrestore(q->queue_lock, flags);
	spin_unlock_irqrestore(q->queue_lock, flags);
}
}
EXPORT_SYMBOL(blk_run_queue);
EXPORT_SYMBOL(blk_run_queue);
@@ -428,7 +421,7 @@ void blk_put_queue(struct request_queue *q)
void blk_cleanup_queue(struct request_queue *q)
void blk_cleanup_queue(struct request_queue *q)
{
{
	mutex_lock(&q->sysfs_lock);
	mutex_lock(&q->sysfs_lock);
	set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
	queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
	mutex_unlock(&q->sysfs_lock);
	mutex_unlock(&q->sysfs_lock);


	if (q->elevator)
	if (q->elevator)
@@ -607,6 +600,8 @@ blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
	if (!rq)
	if (!rq)
		return NULL;
		return NULL;


	blk_rq_init(q, rq);

	/*
	/*
	 * first three bits are identical in rq->cmd_flags and bio->bi_rw,
	 * first three bits are identical in rq->cmd_flags and bio->bi_rw,
	 * see bio.h and blkdev.h
	 * see bio.h and blkdev.h
@@ -789,8 +784,6 @@ rq_starved:
	if (ioc_batching(q, ioc))
	if (ioc_batching(q, ioc))
		ioc->nr_batch_requests--;
		ioc->nr_batch_requests--;


	rq_init(q, rq);

	blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
	blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
out:
out:
	return rq;
	return rq;
+20 −1
Original line number Original line Diff line number Diff line
@@ -255,10 +255,18 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
 * @kbuf:	the kernel buffer
 * @kbuf:	the kernel buffer
 * @len:	length of user data
 * @len:	length of user data
 * @gfp_mask:	memory allocation flags
 * @gfp_mask:	memory allocation flags
 *
 * Description:
 *    Data will be mapped directly if possible. Otherwise a bounce
 *    buffer is used.
 */
 */
int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
		    unsigned int len, gfp_t gfp_mask)
		    unsigned int len, gfp_t gfp_mask)
{
{
	unsigned long kaddr;
	unsigned int alignment;
	int reading = rq_data_dir(rq) == READ;
	int do_copy = 0;
	struct bio *bio;
	struct bio *bio;


	if (len > (q->max_hw_sectors << 9))
	if (len > (q->max_hw_sectors << 9))
@@ -266,13 +274,24 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
	if (!len || !kbuf)
	if (!len || !kbuf)
		return -EINVAL;
		return -EINVAL;


	kaddr = (unsigned long)kbuf;
	alignment = queue_dma_alignment(q) | q->dma_pad_mask;
	do_copy = ((kaddr & alignment) || (len & alignment));

	if (do_copy)
		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
	else
		bio = bio_map_kern(q, kbuf, len, gfp_mask);
		bio = bio_map_kern(q, kbuf, len, gfp_mask);

	if (IS_ERR(bio))
	if (IS_ERR(bio))
		return PTR_ERR(bio);
		return PTR_ERR(bio);


	if (rq_data_dir(rq) == WRITE)
	if (rq_data_dir(rq) == WRITE)
		bio->bi_rw |= (1 << BIO_RW);
		bio->bi_rw |= (1 << BIO_RW);


	if (do_copy)
		rq->cmd_flags |= REQ_COPY_USER;

	blk_rq_bio_prep(q, rq, bio);
	blk_rq_bio_prep(q, rq, bio);
	blk_queue_bounce(q, &rq->bio);
	blk_queue_bounce(q, &rq->bio);
	rq->buffer = rq->data = NULL;
	rq->buffer = rq->data = NULL;
+3 −3
Original line number Original line Diff line number Diff line
@@ -55,7 +55,7 @@ void blk_recalc_rq_segments(struct request *rq)
	if (!rq->bio)
	if (!rq->bio)
		return;
		return;


	cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
	cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
	hw_seg_size = seg_size = 0;
	hw_seg_size = seg_size = 0;
	phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
	phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
	rq_for_each_segment(bv, rq, iter) {
	rq_for_each_segment(bv, rq, iter) {
@@ -128,7 +128,7 @@ EXPORT_SYMBOL(blk_recount_segments);
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
				   struct bio *nxt)
				   struct bio *nxt)
{
{
	if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
	if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
		return 0;
		return 0;


	if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
	if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
@@ -175,7 +175,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
	int nsegs, cluster;
	int nsegs, cluster;


	nsegs = 0;
	nsegs = 0;
	cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
	cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);


	/*
	/*
	 * for each bio in rq
	 * for each bio in rq
+1 −2
Original line number Original line Diff line number Diff line
@@ -14,7 +14,6 @@ unsigned long blk_max_low_pfn;
EXPORT_SYMBOL(blk_max_low_pfn);
EXPORT_SYMBOL(blk_max_low_pfn);


unsigned long blk_max_pfn;
unsigned long blk_max_pfn;
EXPORT_SYMBOL(blk_max_pfn);


/**
/**
 * blk_queue_prep_rq - set a prepare_request function for queue
 * blk_queue_prep_rq - set a prepare_request function for queue
@@ -288,7 +287,7 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
	t->max_segment_size = min(t->max_segment_size, b->max_segment_size);
	t->max_segment_size = min(t->max_segment_size, b->max_segment_size);
	t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
	t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
	if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
	if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
		clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags);
		queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
}
}
EXPORT_SYMBOL(blk_queue_stack_limits);
EXPORT_SYMBOL(blk_queue_stack_limits);


Loading