Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6b8f9159 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-linus-20190125' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A collection of fixes for this release. This contains:

   - Silence sparse rightfully complaining about non-static wbt
     functions (Bart)

   - Fixes for the zoned comments/ioctl documentation (Damien)

   - direct-io fix that's been lingering for a while (Ernesto)

   - cgroup writeback fix (Tejun)

   - Set of NVMe patches for nvme-rdma/tcp (Sagi, Hannes, Raju)

   - Block recursion tracking fix (Ming)

   - Fix debugfs command flag naming for a few flags (Jianchao)"

* tag 'for-linus-20190125' of git://git.kernel.dk/linux-block:
  block: Fix comment typo
  uapi: fix ioctl documentation
  blk-wbt: Declare local functions static
  blk-mq: fix the cmd_flag_name array
  nvme-multipath: drop optimization for static ANA group IDs
  nvmet-rdma: fix null dereference under heavy load
  nvme-rdma: rework queue maps handling
  nvme-tcp: fix timeout handler
  nvme-rdma: fix timeout handler
  writeback: synchronize sync(2) against cgroup writeback membership switches
  block: cover another queue enter recursion via BIO_QUEUE_ENTERED
  direct-io: allow direct writes to empty inodes
parents ba606975 8367de2c
Loading
Loading
Loading
Loading
+11 −0
Original line number Diff line number Diff line
@@ -1083,7 +1083,18 @@ blk_qc_t generic_make_request(struct bio *bio)
			/* Create a fresh bio_list for all subordinate requests */
			bio_list_on_stack[1] = bio_list_on_stack[0];
			bio_list_init(&bio_list_on_stack[0]);

			/*
			 * Since we're recursing into make_request here, ensure
			 * that we mark this bio as already having entered the queue.
			 * If not, and the queue is going away, we can get stuck
			 * forever on waiting for the queue reference to drop. But
			 * that will never happen, as we're already holding a
			 * reference to it.
			 */
			bio_set_flag(bio, BIO_QUEUE_ENTERED);
			ret = q->make_request_fn(q, bio);
			bio_clear_flag(bio, BIO_QUEUE_ENTERED);

			/* sort new bios into those for a lower level
			 * and those for the same level
+0 −10
Original line number Diff line number Diff line
@@ -272,16 +272,6 @@ void blk_queue_split(struct request_queue *q, struct bio **bio)
		/* there isn't chance to merge the splitted bio */
		split->bi_opf |= REQ_NOMERGE;

		/*
		 * Since we're recursing into make_request here, ensure
		 * that we mark this bio as already having entered the queue.
		 * If not, and the queue is going away, we can get stuck
		 * forever on waiting for the queue reference to drop. But
		 * that will never happen, as we're already holding a
		 * reference to it.
		 */
		bio_set_flag(*bio, BIO_QUEUE_ENTERED);

		bio_chain(split, *bio);
		trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
		generic_make_request(*bio);
+2 −1
Original line number Diff line number Diff line
@@ -308,8 +308,9 @@ static const char *const cmd_flag_name[] = {
	CMD_FLAG_NAME(PREFLUSH),
	CMD_FLAG_NAME(RAHEAD),
	CMD_FLAG_NAME(BACKGROUND),
	CMD_FLAG_NAME(NOUNMAP),
	CMD_FLAG_NAME(NOWAIT),
	CMD_FLAG_NAME(NOUNMAP),
	CMD_FLAG_NAME(HIPRI),
};
#undef CMD_FLAG_NAME

+2 −2
Original line number Diff line number Diff line
@@ -597,7 +597,7 @@ static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
	rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
}

void wbt_issue(struct rq_qos *rqos, struct request *rq)
static void wbt_issue(struct rq_qos *rqos, struct request *rq)
{
	struct rq_wb *rwb = RQWB(rqos);

@@ -617,7 +617,7 @@ void wbt_issue(struct rq_qos *rqos, struct request *rq)
	}
}

void wbt_requeue(struct rq_qos *rqos, struct request *rq)
static void wbt_requeue(struct rq_qos *rqos, struct request *rq)
{
	struct rq_wb *rwb = RQWB(rqos);
	if (!rwb_enabled(rwb))
+1 −2
Original line number Diff line number Diff line
@@ -545,7 +545,6 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
	timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
	ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
		ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc);
	if (!(ctrl->anacap & (1 << 6)))
	ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);

	if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) {
Loading