Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f6c42766 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
  libata: implement drain buffers
  libata: eliminate the home grown dma padding in favour of
  block: clear drain buffer if draining for write command
  block: implement request_queue->dma_drain_needed
  block: add request->raw_data_len
  block: update bio according to DMA alignment padding
  libata: update ATAPI overflow draining
  elevator: make elevator_get() attempt to load the appropriate module
  cfq-iosched: add hlist for browsing parallel to the radix tree
  block: make blk_rq_map_user() clear ->bio if it unmaps it
  fs/block_dev.c: remove #if 0'ed code
  make struct def_blk_aops static
  make blk_settings_init() static
  make blk_ioc_init() static
  make blk-core.c:request_cachep static again
parents 9ef38eaf fa2fc7f4
Loading
Loading
Loading
Loading
+3 −1
Original line number Original line Diff line number Diff line
@@ -38,7 +38,7 @@ static int __make_request(struct request_queue *q, struct bio *bio);
/*
/*
 * For the allocated request tables
 * For the allocated request tables
 */
 */
struct kmem_cache *request_cachep;
static struct kmem_cache *request_cachep;


/*
/*
 * For queue allocation
 * For queue allocation
@@ -127,6 +127,7 @@ void rq_init(struct request_queue *q, struct request *rq)
	rq->nr_hw_segments = 0;
	rq->nr_hw_segments = 0;
	rq->ioprio = 0;
	rq->ioprio = 0;
	rq->special = NULL;
	rq->special = NULL;
	rq->raw_data_len = 0;
	rq->buffer = NULL;
	rq->buffer = NULL;
	rq->tag = -1;
	rq->tag = -1;
	rq->errors = 0;
	rq->errors = 0;
@@ -2015,6 +2016,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
	rq->hard_cur_sectors = rq->current_nr_sectors;
	rq->hard_cur_sectors = rq->current_nr_sectors;
	rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
	rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
	rq->buffer = bio_data(bio);
	rq->buffer = bio_data(bio);
	rq->raw_data_len = bio->bi_size;
	rq->data_len = bio->bi_size;
	rq->data_len = bio->bi_size;


	rq->bio = rq->biotail = bio;
	rq->bio = rq->biotail = bio;
+16 −21
Original line number Original line Diff line number Diff line
@@ -17,17 +17,13 @@ static struct kmem_cache *iocontext_cachep;


static void cfq_dtor(struct io_context *ioc)
static void cfq_dtor(struct io_context *ioc)
{
{
	struct cfq_io_context *cic[1];
	if (!hlist_empty(&ioc->cic_list)) {
	int r;
		struct cfq_io_context *cic;


	/*
		cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
	 * We don't have a specific key to lookup with, so use the gang
								cic_list);
	 * lookup to just retrieve the first item stored. The cfq exit
		cic->dtor(ioc);
	 * function will iterate the full tree, so any member will do.
	}
	 */
	r = radix_tree_gang_lookup(&ioc->radix_root, (void **) cic, 0, 1);
	if (r > 0)
		cic[0]->dtor(ioc);
}
}


/*
/*
@@ -57,18 +53,16 @@ EXPORT_SYMBOL(put_io_context);


static void cfq_exit(struct io_context *ioc)
static void cfq_exit(struct io_context *ioc)
{
{
	struct cfq_io_context *cic[1];
	int r;

	rcu_read_lock();
	rcu_read_lock();
	/*
	 * See comment for cfq_dtor()
	 */
	r = radix_tree_gang_lookup(&ioc->radix_root, (void **) cic, 0, 1);
	rcu_read_unlock();


	if (r > 0)
	if (!hlist_empty(&ioc->cic_list)) {
		cic[0]->exit(ioc);
		struct cfq_io_context *cic;

		cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
								cic_list);
		cic->exit(ioc);
	}
	rcu_read_unlock();
}
}


/* Called by the exitting task */
/* Called by the exitting task */
@@ -105,6 +99,7 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
		ret->nr_batch_requests = 0; /* because this is 0 */
		ret->nr_batch_requests = 0; /* because this is 0 */
		ret->aic = NULL;
		ret->aic = NULL;
		INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
		INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
		INIT_HLIST_HEAD(&ret->cic_list);
		ret->ioc_data = NULL;
		ret->ioc_data = NULL;
	}
	}


@@ -176,7 +171,7 @@ void copy_io_context(struct io_context **pdst, struct io_context **psrc)
}
}
EXPORT_SYMBOL(copy_io_context);
EXPORT_SYMBOL(copy_io_context);


int __init blk_ioc_init(void)
static int __init blk_ioc_init(void)
{
{
	iocontext_cachep = kmem_cache_create("blkdev_ioc",
	iocontext_cachep = kmem_cache_create("blkdev_ioc",
			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
			sizeof(struct io_context), 0, SLAB_PANIC, NULL);
+20 −0
Original line number Original line Diff line number Diff line
@@ -19,6 +19,7 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
		rq->biotail->bi_next = bio;
		rq->biotail->bi_next = bio;
		rq->biotail = bio;
		rq->biotail = bio;


		rq->raw_data_len += bio->bi_size;
		rq->data_len += bio->bi_size;
		rq->data_len += bio->bi_size;
	}
	}
	return 0;
	return 0;
@@ -139,10 +140,29 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
		ubuf += ret;
		ubuf += ret;
	}
	}


	/*
	 * __blk_rq_map_user() copies the buffers if starting address
	 * or length isn't aligned.  As the copied buffer is always
	 * page aligned, we know that there's enough room for padding.
	 * Extend the last bio and update rq->data_len accordingly.
	 *
	 * On unmap, bio_uncopy_user() will use unmodified
	 * bio_map_data pointed to by bio->bi_private.
	 */
	if (len & queue_dma_alignment(q)) {
		unsigned int pad_len = (queue_dma_alignment(q) & ~len) + 1;
		struct bio *bio = rq->biotail;

		bio->bi_io_vec[bio->bi_vcnt - 1].bv_len += pad_len;
		bio->bi_size += pad_len;
		rq->data_len += pad_len;
	}

	rq->buffer = rq->data = NULL;
	rq->buffer = rq->data = NULL;
	return 0;
	return 0;
unmap_rq:
unmap_rq:
	blk_rq_unmap_user(bio);
	blk_rq_unmap_user(bio);
	rq->bio = NULL;
	return ret;
	return ret;
}
}
EXPORT_SYMBOL(blk_rq_map_user);
EXPORT_SYMBOL(blk_rq_map_user);
+5 −1
Original line number Original line Diff line number Diff line
@@ -220,7 +220,10 @@ new_segment:
		bvprv = bvec;
		bvprv = bvec;
	} /* segments in rq */
	} /* segments in rq */


	if (q->dma_drain_size) {
	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
		if (rq->cmd_flags & REQ_RW)
			memset(q->dma_drain_buffer, 0, q->dma_drain_size);

		sg->page_link &= ~0x02;
		sg->page_link &= ~0x02;
		sg = sg_next(sg);
		sg = sg_next(sg);
		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
@@ -228,6 +231,7 @@ new_segment:
			    ((unsigned long)q->dma_drain_buffer) &
			    ((unsigned long)q->dma_drain_buffer) &
			    (PAGE_SIZE - 1));
			    (PAGE_SIZE - 1));
		nsegs++;
		nsegs++;
		rq->data_len += q->dma_drain_size;
	}
	}


	if (sg)
	if (sg)
+6 −3
Original line number Original line Diff line number Diff line
@@ -296,6 +296,7 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
 * blk_queue_dma_drain - Set up a drain buffer for excess dma.
 * blk_queue_dma_drain - Set up a drain buffer for excess dma.
 *
 *
 * @q:  the request queue for the device
 * @q:  the request queue for the device
 * @dma_drain_needed: fn which returns non-zero if drain is necessary
 * @buf:	physically contiguous buffer
 * @buf:	physically contiguous buffer
 * @size:	size of the buffer in bytes
 * @size:	size of the buffer in bytes
 *
 *
@@ -315,14 +316,16 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
 * device can support otherwise there won't be room for the drain
 * device can support otherwise there won't be room for the drain
 * buffer.
 * buffer.
 */
 */
int blk_queue_dma_drain(struct request_queue *q, void *buf,
extern int blk_queue_dma_drain(struct request_queue *q,
				unsigned int size)
			       dma_drain_needed_fn *dma_drain_needed,
			       void *buf, unsigned int size)
{
{
	if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
	if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
		return -EINVAL;
		return -EINVAL;
	/* make room for appending the drain */
	/* make room for appending the drain */
	--q->max_hw_segments;
	--q->max_hw_segments;
	--q->max_phys_segments;
	--q->max_phys_segments;
	q->dma_drain_needed = dma_drain_needed;
	q->dma_drain_buffer = buf;
	q->dma_drain_buffer = buf;
	q->dma_drain_size = size;
	q->dma_drain_size = size;


@@ -386,7 +389,7 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
}
}
EXPORT_SYMBOL(blk_queue_update_dma_alignment);
EXPORT_SYMBOL(blk_queue_update_dma_alignment);


int __init blk_settings_init(void)
static int __init blk_settings_init(void)
{
{
	blk_max_low_pfn = max_low_pfn - 1;
	blk_max_low_pfn = max_low_pfn - 1;
	blk_max_pfn = max_pfn - 1;
	blk_max_pfn = max_pfn - 1;
Loading