Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 857953d7 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull more block fixes from Jens Axboe:
 "As mentioned in the pull the other day, a few more fixes for this
  round, all related to the bio op changes in this series.

  Two fixes, and then a cleanup, renaming bio->bi_rw to bio->bi_opf.  I
  wanted to do that change right after or right before -rc1, so that
  risk of conflict was reduced.  I just rebased the series on top of
  current master, and no new ->bi_rw usage has snuck in"

* 'for-linus' of git://git.kernel.dk/linux-block:
  block: rename bio bi_rw to bi_opf
  target: iblock_execute_sync_cache() should use bio_set_op_attrs()
  mm: make __swap_writepage() use bio_set_op_attrs()
  block/mm: make bdev_ops->rw_page() take a bool for read/write
parents 635a4ba1 1eff9d32
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -269,7 +269,7 @@ Arjan's proposed request priority scheme allows higher levels some broad
  requests which haven't aged too much on the queue. Potentially this priority
  could even be exposed to applications in some manner, providing higher level
  tunability. Time based aging avoids starvation of lower priority
  requests. Some bits in the bi_rw flags field in the bio structure are
  requests. Some bits in the bi_opf flags field in the bio structure are
  intended to be used for this priority information.


@@ -432,7 +432,7 @@ struct bio {
       struct bio          *bi_next;    /* request queue link */
       struct block_device *bi_bdev;	/* target device */
       unsigned long       bi_flags;    /* status, command, etc */
       unsigned long       bi_rw;       /* low bits: r/w, high: priority */
       unsigned long       bi_opf;       /* low bits: r/w, high: priority */

       unsigned int	bi_vcnt;     /* how may bio_vec's */
       struct bvec_iter	bi_iter;	/* current index into bio_vec array */
+1 −1
Original line number Diff line number Diff line
@@ -42,7 +42,7 @@ Optional feature parameters:
    <direction>: Either 'r' to corrupt reads or 'w' to corrupt writes.
		 'w' is incompatible with drop_writes.
    <value>: The value (from 0-255) to write.
    <flags>: Perform the replacement only if bio->bi_rw has all the
    <flags>: Perform the replacement only if bio->bi_opf has all the
	     selected flags set.

Examples:
+1 −1
Original line number Diff line number Diff line
@@ -86,7 +86,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,

	bip->bip_bio = bio;
	bio->bi_integrity = bip;
	bio->bi_rw |= REQ_INTEGRITY;
	bio->bi_opf |= REQ_INTEGRITY;

	return bip;
err:
+3 −3
Original line number Diff line number Diff line
@@ -580,7 +580,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
	 */
	bio->bi_bdev = bio_src->bi_bdev;
	bio_set_flag(bio, BIO_CLONED);
	bio->bi_rw = bio_src->bi_rw;
	bio->bi_opf = bio_src->bi_opf;
	bio->bi_iter = bio_src->bi_iter;
	bio->bi_io_vec = bio_src->bi_io_vec;

@@ -663,7 +663,7 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
	if (!bio)
		return NULL;
	bio->bi_bdev		= bio_src->bi_bdev;
	bio->bi_rw		= bio_src->bi_rw;
	bio->bi_opf		= bio_src->bi_opf;
	bio->bi_iter.bi_sector	= bio_src->bi_iter.bi_sector;
	bio->bi_iter.bi_size	= bio_src->bi_iter.bi_size;

@@ -873,7 +873,7 @@ int submit_bio_wait(struct bio *bio)
	init_completion(&ret.event);
	bio->bi_private = &ret;
	bio->bi_end_io = submit_bio_wait_endio;
	bio->bi_rw |= REQ_SYNC;
	bio->bi_opf |= REQ_SYNC;
	submit_bio(bio);
	wait_for_completion_io(&ret.event);

+13 −13
Original line number Diff line number Diff line
@@ -1029,7 +1029,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
	 * Flush requests do not use the elevator so skip initialization.
	 * This allows a request to share the flush and elevator data.
	 */
	if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA))
	if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA))
		return false;

	return true;
@@ -1504,7 +1504,7 @@ EXPORT_SYMBOL_GPL(blk_add_request_payload);
bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
			    struct bio *bio)
{
	const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
	const int ff = bio->bi_opf & REQ_FAILFAST_MASK;

	if (!ll_back_merge_fn(q, req, bio))
		return false;
@@ -1526,7 +1526,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
			     struct bio *bio)
{
	const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
	const int ff = bio->bi_opf & REQ_FAILFAST_MASK;

	if (!ll_front_merge_fn(q, req, bio))
		return false;
@@ -1648,8 +1648,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)
{
	req->cmd_type = REQ_TYPE_FS;

	req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
	if (bio->bi_rw & REQ_RAHEAD)
	req->cmd_flags |= bio->bi_opf & REQ_COMMON_MASK;
	if (bio->bi_opf & REQ_RAHEAD)
		req->cmd_flags |= REQ_FAILFAST_MASK;

	req->errors = 0;
@@ -1660,7 +1660,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)

static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
{
	const bool sync = !!(bio->bi_rw & REQ_SYNC);
	const bool sync = !!(bio->bi_opf & REQ_SYNC);
	struct blk_plug *plug;
	int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT;
	struct request *req;
@@ -1681,7 +1681,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
		return BLK_QC_T_NONE;
	}

	if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) {
	if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) {
		spin_lock_irq(q->queue_lock);
		where = ELEVATOR_INSERT_FLUSH;
		goto get_rq;
@@ -1728,7 +1728,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
	/*
	 * Add in META/PRIO flags, if set, before we get to the IO scheduler
	 */
	rw_flags |= (bio->bi_rw & (REQ_META | REQ_PRIO));
	rw_flags |= (bio->bi_opf & (REQ_META | REQ_PRIO));

	/*
	 * Grab a free request. This is might sleep but can not fail.
@@ -1805,7 +1805,7 @@ static void handle_bad_sector(struct bio *bio)
	printk(KERN_INFO "attempt to access beyond end of device\n");
	printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
			bdevname(bio->bi_bdev, b),
			bio->bi_rw,
			bio->bi_opf,
			(unsigned long long)bio_end_sector(bio),
			(long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
}
@@ -1918,9 +1918,9 @@ generic_make_request_checks(struct bio *bio)
	 * drivers without flush support don't have to worry
	 * about them.
	 */
	if ((bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) &&
	if ((bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) &&
	    !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
		bio->bi_rw &= ~(REQ_PREFLUSH | REQ_FUA);
		bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
		if (!nr_sectors) {
			err = 0;
			goto end_io;
@@ -2219,7 +2219,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
	 * one.
	 */
	for (bio = rq->bio; bio; bio = bio->bi_next) {
		if ((bio->bi_rw & ff) != ff)
		if ((bio->bi_opf & ff) != ff)
			break;
		bytes += bio->bi_iter.bi_size;
	}
@@ -2630,7 +2630,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
	/* mixed attributes always follow the first bio */
	if (req->cmd_flags & REQ_MIXED_MERGE) {
		req->cmd_flags &= ~REQ_FAILFAST_MASK;
		req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK;
		req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
	}

	/*
Loading