Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 97c68d00 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
  cfq-iosched: cache prio_tree root in cfqq->p_root
  cfq-iosched: fix bug with aliased request and cooperation detection
  cfq-iosched: clear ->prio_trees[] on cfqd alloc
  block: fix intermittent dm timeout based oops
  umem: fix request_queue lock warning
  block: simplify I/O stat accounting
  pktcdvd.h should include mempool.h
  cfq-iosched: use the default seek distance when there aren't enough seek samples
  cfq-iosched: make seek_mean converge more quickly
  block: make blk_abort_queue() ignore non-request based devices
  block: include empty disks in /proc/diskstats
  bio: use bio_kmalloc() in copy/map functions
  bio: fix bio_kmalloc()
  block: fix queue bounce limit setting
  block: fix SG_IO vector request data length handling
  scatterlist: make sure sg_miter_next() doesn't return 0 sized mappings
parents 596a5c4e f2d1f0ae
Loading
Loading
Loading
Loading
+4 −2
Original line number Original line Diff line number Diff line
@@ -643,7 +643,7 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq)
}
}


static struct request *
static struct request *
blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask)
{
{
	struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
	struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);


@@ -652,7 +652,7 @@ blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)


	blk_rq_init(q, rq);
	blk_rq_init(q, rq);


	rq->cmd_flags = rw | REQ_ALLOCED;
	rq->cmd_flags = flags | REQ_ALLOCED;


	if (priv) {
	if (priv) {
		if (unlikely(elv_set_request(q, rq, gfp_mask))) {
		if (unlikely(elv_set_request(q, rq, gfp_mask))) {
@@ -792,6 +792,8 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
	if (priv)
	if (priv)
		rl->elvpriv++;
		rl->elvpriv++;


	if (blk_queue_io_stat(q))
		rw_flags |= REQ_IO_STAT;
	spin_unlock_irq(q->queue_lock);
	spin_unlock_irq(q->queue_lock);


	rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
	rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
+4 −1
Original line number Original line Diff line number Diff line
@@ -402,7 +402,10 @@ static int attempt_merge(struct request_queue *q, struct request *req,


	elv_merge_requests(q, req, next);
	elv_merge_requests(q, req, next);


	blk_account_io_merge(req);
	/*
	 * 'next' is going away, so update stats accordingly
	 */
	blk_account_io_merge(next);


	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
	if (blk_rq_cpu_valid(next))
	if (blk_rq_cpu_valid(next))
+11 −9
Original line number Original line Diff line number Diff line
@@ -157,25 +157,27 @@ EXPORT_SYMBOL(blk_queue_make_request);
/**
/**
 * blk_queue_bounce_limit - set bounce buffer limit for queue
 * blk_queue_bounce_limit - set bounce buffer limit for queue
 * @q: the request queue for the device
 * @q: the request queue for the device
 * @dma_addr:   bus address limit
 * @dma_mask: the maximum address the device can handle
 *
 *
 * Description:
 * Description:
 *    Different hardware can have different requirements as to what pages
 *    Different hardware can have different requirements as to what pages
 *    it can do I/O directly to. A low level driver can call
 *    it can do I/O directly to. A low level driver can call
 *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
 *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
 *    buffers for doing I/O to pages residing above @dma_addr.
 *    buffers for doing I/O to pages residing above @dma_mask.
 **/
 **/
void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
{
{
	unsigned long b_pfn = dma_addr >> PAGE_SHIFT;
	unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
	int dma = 0;
	int dma = 0;


	q->bounce_gfp = GFP_NOIO;
	q->bounce_gfp = GFP_NOIO;
#if BITS_PER_LONG == 64
#if BITS_PER_LONG == 64
	/* Assume anything <= 4GB can be handled by IOMMU.
	/*
	   Actually some IOMMUs can handle everything, but I don't
	 * Assume anything <= 4GB can be handled by IOMMU.  Actually
	   know of a way to test this here. */
	 * some IOMMUs can handle everything, but I don't know of a
	if (b_pfn < (min_t(u64, 0x100000000UL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
	 * way to test this here.
	 */
	if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
		dma = 1;
		dma = 1;
	q->bounce_pfn = max_low_pfn;
	q->bounce_pfn = max_low_pfn;
#else
#else
+0 −4
Original line number Original line Diff line number Diff line
@@ -209,14 +209,10 @@ static ssize_t queue_iostats_store(struct request_queue *q, const char *page,
	ssize_t ret = queue_var_store(&stats, page, count);
	ssize_t ret = queue_var_store(&stats, page, count);


	spin_lock_irq(q->queue_lock);
	spin_lock_irq(q->queue_lock);
	elv_quiesce_start(q);

	if (stats)
	if (stats)
		queue_flag_set(QUEUE_FLAG_IO_STAT, q);
		queue_flag_set(QUEUE_FLAG_IO_STAT, q);
	else
	else
		queue_flag_clear(QUEUE_FLAG_IO_STAT, q);
		queue_flag_clear(QUEUE_FLAG_IO_STAT, q);

	elv_quiesce_end(q);
	spin_unlock_irq(q->queue_lock);
	spin_unlock_irq(q->queue_lock);


	return ret;
	return ret;
+13 −0
Original line number Original line Diff line number Diff line
@@ -211,6 +211,12 @@ void blk_abort_queue(struct request_queue *q)
	struct request *rq, *tmp;
	struct request *rq, *tmp;
	LIST_HEAD(list);
	LIST_HEAD(list);


	/*
	 * Not a request based block device, nothing to abort
	 */
	if (!q->request_fn)
		return;

	spin_lock_irqsave(q->queue_lock, flags);
	spin_lock_irqsave(q->queue_lock, flags);


	elv_abort_queue(q);
	elv_abort_queue(q);
@@ -224,6 +230,13 @@ void blk_abort_queue(struct request_queue *q)
	list_for_each_entry_safe(rq, tmp, &list, timeout_list)
	list_for_each_entry_safe(rq, tmp, &list, timeout_list)
		blk_abort_request(rq);
		blk_abort_request(rq);


	/*
	 * Occasionally, blk_abort_request() will return without
	 * deleting the element from the list. Make sure we add those back
	 * instead of leaving them on the local stack list.
	 */
	list_splice(&list, &q->timeout_list);

	spin_unlock_irqrestore(q->queue_lock, flags);
	spin_unlock_irqrestore(q->queue_lock, flags);


}
}
Loading