Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7a34912d authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
  Revert "relay: fix splice problem"
  docbook: fix bio missing parameter
  block: use unitialized_var() in bio_alloc_bioset()
  block: avoid duplicate calls to get_part() in disk stat code
  cfq-iosched: make io priorities inherit CPU scheduling class as well as nice
  block: optimize generic_unplug_device()
  block: get rid of likely/unlikely predictions in merge logic
  vfs: splice remove_suid() cleanup
  cfq-iosched: fix RCU race in the cfq io_context destructor handling
  block: adjust tagging function queue bit locking
  block: sysfs store function needs to grab queue_lock and use queue_flag_*()
parents 0f1bce41 75065ff6
Loading
Loading
Loading
Loading
+15 −11
Original line number Diff line number Diff line
@@ -54,15 +54,16 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);

static void drive_stat_acct(struct request *rq, int new_io)
{
	struct hd_struct *part;
	int rw = rq_data_dir(rq);

	if (!blk_fs_request(rq) || !rq->rq_disk)
		return;

	if (!new_io) {
		__all_stat_inc(rq->rq_disk, merges[rw], rq->sector);
	} else {
		struct hd_struct *part = get_part(rq->rq_disk, rq->sector);
	part = get_part(rq->rq_disk, rq->sector);
	if (!new_io)
		__all_stat_inc(rq->rq_disk, part, merges[rw], rq->sector);
	else {
		disk_round_stats(rq->rq_disk);
		rq->rq_disk->in_flight++;
		if (part) {
@@ -253,10 +254,12 @@ EXPORT_SYMBOL(__generic_unplug_device);
 **/
void generic_unplug_device(struct request_queue *q)
{
	if (blk_queue_plugged(q)) {
		spin_lock_irq(q->queue_lock);
		__generic_unplug_device(q);
		spin_unlock_irq(q->queue_lock);
	}
}
EXPORT_SYMBOL(generic_unplug_device);

static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
@@ -1536,9 +1539,10 @@ static int __end_that_request_first(struct request *req, int error,
	}

	if (blk_fs_request(req) && req->rq_disk) {
		struct hd_struct *part = get_part(req->rq_disk, req->sector);
		const int rw = rq_data_dir(req);

		all_stat_add(req->rq_disk, sectors[rw],
		all_stat_add(req->rq_disk, part, sectors[rw],
				nr_bytes >> 9, req->sector);
	}

@@ -1725,8 +1729,8 @@ static void end_that_request_last(struct request *req, int error)
		const int rw = rq_data_dir(req);
		struct hd_struct *part = get_part(disk, req->sector);

		__all_stat_inc(disk, ios[rw], req->sector);
		__all_stat_add(disk, ticks[rw], duration, req->sector);
		__all_stat_inc(disk, part, ios[rw], req->sector);
		__all_stat_add(disk, part, ticks[rw], duration, req->sector);
		disk_round_stats(disk);
		disk->in_flight--;
		if (part) {
+1 −1
Original line number Diff line number Diff line
@@ -41,8 +41,8 @@ int put_io_context(struct io_context *ioc)
		rcu_read_lock();
		if (ioc->aic && ioc->aic->dtor)
			ioc->aic->dtor(ioc->aic);
		rcu_read_unlock();
		cfq_dtor(ioc);
		rcu_read_unlock();

		kmem_cache_free(iocontext_cachep, ioc);
		return 1;
+6 −6
Original line number Diff line number Diff line
@@ -149,9 +149,9 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio,
				 struct bio *nxt)
{
	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
	if (!bio_flagged(bio, BIO_SEG_VALID))
		blk_recount_segments(q, bio);
	if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID)))
	if (!bio_flagged(nxt, BIO_SEG_VALID))
		blk_recount_segments(q, nxt);
	if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
	    BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size))
@@ -312,9 +312,9 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
			q->last_merge = NULL;
		return 0;
	}
	if (unlikely(!bio_flagged(req->biotail, BIO_SEG_VALID)))
	if (!bio_flagged(req->biotail, BIO_SEG_VALID))
		blk_recount_segments(q, req->biotail);
	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
	if (!bio_flagged(bio, BIO_SEG_VALID))
		blk_recount_segments(q, bio);
	len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
	if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio))
@@ -352,9 +352,9 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
		return 0;
	}
	len = bio->bi_hw_back_size + req->bio->bi_hw_front_size;
	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
	if (!bio_flagged(bio, BIO_SEG_VALID))
		blk_recount_segments(q, bio);
	if (unlikely(!bio_flagged(req->bio, BIO_SEG_VALID)))
	if (!bio_flagged(req->bio, BIO_SEG_VALID))
		blk_recount_segments(q, req->bio);
	if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&
	    !BIOVEC_VIRT_OVERSIZE(len)) {
+4 −2
Original line number Diff line number Diff line
@@ -146,11 +146,13 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
	unsigned long nm;
	ssize_t ret = queue_var_store(&nm, page, count);

	spin_lock_irq(q->queue_lock);
	if (nm)
	       set_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags);
		queue_flag_set(QUEUE_FLAG_NOMERGES, q);
	else
	       clear_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags);
		queue_flag_clear(QUEUE_FLAG_NOMERGES, q);

	spin_unlock_irq(q->queue_lock);
	return ret;
}

+6 −3
Original line number Diff line number Diff line
@@ -70,7 +70,7 @@ void __blk_queue_free_tags(struct request_queue *q)
	__blk_free_tags(bqt);

	q->queue_tags = NULL;
	queue_flag_clear(QUEUE_FLAG_QUEUED, q);
	queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
}

/**
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(blk_free_tags);
 **/
void blk_queue_free_tags(struct request_queue *q)
{
	queue_flag_clear(QUEUE_FLAG_QUEUED, q);
	queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
}
EXPORT_SYMBOL(blk_queue_free_tags);

@@ -171,6 +171,9 @@ EXPORT_SYMBOL(blk_init_tags);
 * @q:  the request queue for the device
 * @depth:  the maximum queue depth supported
 * @tags: the tag to use
 *
 * Queue lock must be held here if the function is called to resize an
 * existing map.
 **/
int blk_queue_init_tags(struct request_queue *q, int depth,
			struct blk_queue_tag *tags)
@@ -197,7 +200,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
	 * assign it, all done
	 */
	q->queue_tags = tags;
	queue_flag_set(QUEUE_FLAG_QUEUED, q);
	queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
	INIT_LIST_HEAD(&q->tag_busy_list);
	return 0;
fail:
Loading