Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 44148a66 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-2.6-block:
  ide: always ensure that blk_delay_queue() is called if we have pending IO
  block: fix request sorting at unplug
  dm: improve block integrity support
  fs: export empty_aops
  ide: ide_requeue_and_plug() reinstate "always plug" behaviour
  blk-throttle: don't call xchg on bool
  ufs: remove unessecary blk_flush_plug
  block: make the flush insertion use the tail of the dispatch list
  block: get rid of elv_insert() interface
  block: dump request state on seeing a corrupted request completion
parents d0de4dc5 782b86e2
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -2163,7 +2163,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
	 * size, something has gone terribly wrong.
	 */
	if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
		printk(KERN_ERR "blk: request botched\n");
		blk_dump_rq_flags(req, "request botched");
		req->__data_len = blk_rq_cur_bytes(req);
	}

@@ -2665,7 +2665,7 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
	struct request *rqa = container_of(a, struct request, queuelist);
	struct request *rqb = container_of(b, struct request, queuelist);

	return !(rqa->q == rqb->q);
	return !(rqa->q <= rqb->q);
}

static void flush_plug_list(struct blk_plug *plug)
+3 −3
Original line number Diff line number Diff line
@@ -261,7 +261,7 @@ static bool blk_kick_flush(struct request_queue *q)
	q->flush_rq.end_io = flush_end_io;

	q->flush_pending_idx ^= 1;
	elv_insert(q, &q->flush_rq, ELEVATOR_INSERT_REQUEUE);
	list_add_tail(&q->flush_rq.queuelist, &q->queue_head);
	return true;
}

@@ -281,7 +281,7 @@ static void flush_data_end_io(struct request *rq, int error)
 * blk_insert_flush - insert a new FLUSH/FUA request
 * @rq: request to insert
 *
 * To be called from elv_insert() for %ELEVATOR_INSERT_FLUSH insertions.
 * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
 * @rq is being submitted.  Analyze what needs to be done and put it on the
 * right queue.
 *
@@ -312,7 +312,7 @@ void blk_insert_flush(struct request *rq)
	 */
	if ((policy & REQ_FSEQ_DATA) &&
	    !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
		list_add(&rq->queuelist, &q->queue_head);
		list_add_tail(&rq->queuelist, &q->queue_head);
		return;
	}

+11 −1
Original line number Diff line number Diff line
@@ -30,6 +30,8 @@

static struct kmem_cache *integrity_cachep;

static const char *bi_unsupported_name = "unsupported";

/**
 * blk_rq_count_integrity_sg - Count number of integrity scatterlist elements
 * @q:		request queue
@@ -358,6 +360,14 @@ static struct kobj_type integrity_ktype = {
	.release	= blk_integrity_release,
};

bool blk_integrity_is_initialized(struct gendisk *disk)
{
	struct blk_integrity *bi = blk_get_integrity(disk);

	return (bi && bi->name && strcmp(bi->name, bi_unsupported_name) != 0);
}
EXPORT_SYMBOL(blk_integrity_is_initialized);

/**
 * blk_integrity_register - Register a gendisk as being integrity-capable
 * @disk:	struct gendisk pointer to make integrity-aware
@@ -407,7 +417,7 @@ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
		bi->get_tag_fn = template->get_tag_fn;
		bi->tag_size = template->tag_size;
	} else
		bi->name = "unsupported";
		bi->name = bi_unsupported_name;

	return 0;
}
+2 −2
Original line number Diff line number Diff line
@@ -77,7 +77,7 @@ struct throtl_grp {
	unsigned long slice_end[2];

	/* Some throttle limits got updated for the group */
	bool limits_changed;
	int limits_changed;
};

struct throtl_data
@@ -102,7 +102,7 @@ struct throtl_data
	/* Work for dispatching throttled bios */
	struct delayed_work throtl_work;

	bool limits_changed;
	int limits_changed;
};

enum tg_state_flags {
+15 −20
Original line number Diff line number Diff line
@@ -610,7 +610,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)

	rq->cmd_flags &= ~REQ_STARTED;

	elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
	__elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
}

void elv_drain_elevator(struct request_queue *q)
@@ -655,12 +655,25 @@ void elv_quiesce_end(struct request_queue *q)
	queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
}

void elv_insert(struct request_queue *q, struct request *rq, int where)
void __elv_add_request(struct request_queue *q, struct request *rq, int where)
{
	trace_block_rq_insert(q, rq);

	rq->q = q;

	BUG_ON(rq->cmd_flags & REQ_ON_PLUG);

	if (rq->cmd_flags & REQ_SOFTBARRIER) {
		/* barriers are scheduling boundary, update end_sector */
		if (rq->cmd_type == REQ_TYPE_FS ||
		    (rq->cmd_flags & REQ_DISCARD)) {
			q->end_sector = rq_end_sector(rq);
			q->boundary_rq = rq;
		}
	} else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
		    where == ELEVATOR_INSERT_SORT)
		where = ELEVATOR_INSERT_BACK;

	switch (where) {
	case ELEVATOR_INSERT_REQUEUE:
	case ELEVATOR_INSERT_FRONT:
@@ -722,24 +735,6 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
		BUG();
	}
}

void __elv_add_request(struct request_queue *q, struct request *rq, int where)
{
	BUG_ON(rq->cmd_flags & REQ_ON_PLUG);

	if (rq->cmd_flags & REQ_SOFTBARRIER) {
		/* barriers are scheduling boundary, update end_sector */
		if (rq->cmd_type == REQ_TYPE_FS ||
		    (rq->cmd_flags & REQ_DISCARD)) {
			q->end_sector = rq_end_sector(rq);
			q->boundary_rq = rq;
		}
	} else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
		    where == ELEVATOR_INSERT_SORT)
		where = ELEVATOR_INSERT_BACK;

	elv_insert(q, rq, where);
}
EXPORT_SYMBOL(__elv_add_request);

void elv_add_request(struct request_queue *q, struct request *rq, int where)
Loading