Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b816d45f authored by Jens Axboe's avatar Jens Axboe
Browse files

Merge branch 'for-4.2/core' into for-4.2/drivers

We need the blkdev_reread_part() changes for drivers to adapt.
parents cddcd72b b04a5636
Loading
Loading
Loading
Loading
+24 −17
Original line number Diff line number Diff line
@@ -1522,7 +1522,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
 * Caller must ensure !blk_queue_nomerges(q) beforehand.
 */
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
			    unsigned int *request_count)
			    unsigned int *request_count,
			    struct request **same_queue_rq)
{
	struct blk_plug *plug;
	struct request *rq;
@@ -1542,8 +1543,16 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
	list_for_each_entry_reverse(rq, plug_list, queuelist) {
		int el_ret;

		if (rq->q == q)
		if (rq->q == q) {
			(*request_count)++;
			/*
			 * Only blk-mq multiple hardware queues case checks the
			 * rq in the same queue, there should be only one such
			 * rq in a queue
			 **/
			if (same_queue_rq)
				*same_queue_rq = rq;
		}

		if (rq->q != q || !blk_rq_merge_ok(rq, bio))
			continue;
@@ -1608,7 +1617,7 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio)
	 * any locks.
	 */
	if (!blk_queue_nomerges(q) &&
	    blk_attempt_plug_merge(q, bio, &request_count))
	    blk_attempt_plug_merge(q, bio, &request_count, NULL))
		return;

	spin_lock_irq(q->queue_lock);
@@ -1716,8 +1725,6 @@ static void handle_bad_sector(struct bio *bio)
			bio->bi_rw,
			(unsigned long long)bio_end_sector(bio),
			(long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));

	set_bit(BIO_EOF, &bio->bi_flags);
}

#ifdef CONFIG_FAIL_MAKE_REQUEST
@@ -3032,22 +3039,21 @@ void blk_start_plug(struct blk_plug *plug)
{
	struct task_struct *tsk = current;

	/*
	 * If this is a nested plug, don't actually assign it.
	 */
	if (tsk->plug)
		return;

	INIT_LIST_HEAD(&plug->list);
	INIT_LIST_HEAD(&plug->mq_list);
	INIT_LIST_HEAD(&plug->cb_list);

	/*
	 * If this is a nested plug, don't actually assign it. It will be
	 * flushed on its own.
	 */
	if (!tsk->plug) {
	/*
	 * Store ordering should not be needed here, since a potential
	 * preempt will imply a full memory barrier
	 */
	tsk->plug = plug;
}
}
EXPORT_SYMBOL(blk_start_plug);

static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
@@ -3193,9 +3199,10 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)

void blk_finish_plug(struct blk_plug *plug)
{
	if (plug != current->plug)
		return;
	blk_flush_plug_list(plug, false);

	if (plug == current->plug)
	current->plug = NULL;
}
EXPORT_SYMBOL(blk_finish_plug);
+89 −59
Original line number Diff line number Diff line
@@ -89,7 +89,8 @@ static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp)
			return -EBUSY;

		ret = wait_event_interruptible(q->mq_freeze_wq,
				!q->mq_freeze_depth || blk_queue_dying(q));
				!atomic_read(&q->mq_freeze_depth) ||
				blk_queue_dying(q));
		if (blk_queue_dying(q))
			return -ENODEV;
		if (ret)
@@ -112,13 +113,10 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)

void blk_mq_freeze_queue_start(struct request_queue *q)
{
	bool freeze;
	int freeze_depth;

	spin_lock_irq(q->queue_lock);
	freeze = !q->mq_freeze_depth++;
	spin_unlock_irq(q->queue_lock);

	if (freeze) {
	freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
	if (freeze_depth == 1) {
		percpu_ref_kill(&q->mq_usage_counter);
		blk_mq_run_hw_queues(q, false);
	}
@@ -143,13 +141,11 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);

void blk_mq_unfreeze_queue(struct request_queue *q)
{
	bool wake;
	int freeze_depth;

	spin_lock_irq(q->queue_lock);
	wake = !--q->mq_freeze_depth;
	WARN_ON_ONCE(q->mq_freeze_depth < 0);
	spin_unlock_irq(q->queue_lock);
	if (wake) {
	freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
	WARN_ON_ONCE(freeze_depth < 0);
	if (!freeze_depth) {
		percpu_ref_reinit(&q->mq_usage_counter);
		wake_up_all(&q->mq_freeze_wq);
	}
@@ -1224,6 +1220,38 @@ static struct request *blk_mq_map_request(struct request_queue *q,
	return rq;
}

static int blk_mq_direct_issue_request(struct request *rq)
{
	int ret;
	struct request_queue *q = rq->q;
	struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q,
			rq->mq_ctx->cpu);
	struct blk_mq_queue_data bd = {
		.rq = rq,
		.list = NULL,
		.last = 1
	};

	/*
	 * For OK queue, we are done. For error, kill it. Any other
	 * error (busy), just add it to our list as we previously
	 * would have done
	 */
	ret = q->mq_ops->queue_rq(hctx, &bd);
	if (ret == BLK_MQ_RQ_QUEUE_OK)
		return 0;
	else {
		__blk_mq_requeue_request(rq);

		if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
			rq->errors = -EIO;
			blk_mq_end_request(rq, rq->errors);
			return 0;
		}
		return -1;
	}
}

/*
 * Multiple hardware queue variant. This will not use per-process plugs,
 * but will attempt to bypass the hctx queueing if we can go straight to
@@ -1235,6 +1263,9 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
	const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
	struct blk_map_ctx data;
	struct request *rq;
	unsigned int request_count = 0;
	struct blk_plug *plug;
	struct request *same_queue_rq = NULL;

	blk_queue_bounce(q, &bio);

@@ -1243,6 +1274,10 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
		return;
	}

	if (!is_flush_fua && !blk_queue_nomerges(q) &&
	    blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
		return;

	rq = blk_mq_map_request(q, bio, &data);
	if (unlikely(!rq))
		return;
@@ -1253,38 +1288,42 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
		goto run_queue;
	}

	plug = current->plug;
	/*
	 * If the driver supports defer issued based on 'last', then
	 * queue it up like normal since we can potentially save some
	 * CPU this way.
	 */
	if (is_sync && !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
		struct blk_mq_queue_data bd = {
			.rq = rq,
			.list = NULL,
			.last = 1
		};
		int ret;
	if (((plug && !blk_queue_nomerges(q)) || is_sync) &&
	    !(data.hctx->flags & BLK_MQ_F_DEFER_ISSUE)) {
		struct request *old_rq = NULL;

		blk_mq_bio_to_request(rq, bio);

		/*
		 * For OK queue, we are done. For error, kill it. Any other
		 * error (busy), just add it to our list as we previously
		 * would have done
		 * we do limited pluging. If bio can be merged, do merge.
		 * Otherwise the existing request in the plug list will be
		 * issued. So the plug list will have one request at most
		 */
		ret = q->mq_ops->queue_rq(data.hctx, &bd);
		if (ret == BLK_MQ_RQ_QUEUE_OK)
			goto done;
		else {
			__blk_mq_requeue_request(rq);

			if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
				rq->errors = -EIO;
				blk_mq_end_request(rq, rq->errors);
				goto done;
			}
		if (plug) {
			/*
			 * The plug list might get flushed before this. If that
			 * happens, same_queue_rq is invalid and plug list is empty
			 **/
			if (same_queue_rq && !list_empty(&plug->mq_list)) {
				old_rq = same_queue_rq;
				list_del_init(&old_rq->queuelist);
			}
			list_add_tail(&rq->queuelist, &plug->mq_list);
		} else /* is_sync */
			old_rq = rq;
		blk_mq_put_ctx(data.ctx);
		if (!old_rq)
			return;
		if (!blk_mq_direct_issue_request(old_rq))
			return;
		blk_mq_insert_request(old_rq, false, true, true);
		return;
	}

	if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
@@ -1297,7 +1336,6 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
run_queue:
		blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
	}
done:
	blk_mq_put_ctx(data.ctx);
}

@@ -1309,16 +1347,11 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
{
	const int is_sync = rw_is_sync(bio->bi_rw);
	const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
	unsigned int use_plug, request_count = 0;
	struct blk_plug *plug;
	unsigned int request_count = 0;
	struct blk_map_ctx data;
	struct request *rq;

	/*
	 * If we have multiple hardware queues, just go directly to
	 * one of those for sync IO.
	 */
	use_plug = !is_flush_fua && !is_sync;

	blk_queue_bounce(q, &bio);

	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
@@ -1326,8 +1359,8 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
		return;
	}

	if (use_plug && !blk_queue_nomerges(q) &&
	    blk_attempt_plug_merge(q, bio, &request_count))
	if (!is_flush_fua && !blk_queue_nomerges(q) &&
	    blk_attempt_plug_merge(q, bio, &request_count, NULL))
		return;

	rq = blk_mq_map_request(q, bio, &data);
@@ -1345,9 +1378,7 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
	 * utilize that to temporarily store requests until the task is
	 * either done or scheduled away.
	 */
	if (use_plug) {
		struct blk_plug *plug = current->plug;

	plug = current->plug;
	if (plug) {
		blk_mq_bio_to_request(rq, bio);
		if (list_empty(&plug->mq_list))
@@ -1360,7 +1391,6 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
		blk_mq_put_ctx(data.ctx);
		return;
	}
	}

	if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
		/*
@@ -2047,7 +2077,7 @@ void blk_mq_free_queue(struct request_queue *q)
/* Basically redo blk_mq_init_queue with queue frozen */
static void blk_mq_queue_reinit(struct request_queue *q)
{
	WARN_ON_ONCE(!q->mq_freeze_depth);
	WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));

	blk_mq_sysfs_unregister(q);

+2 −1
Original line number Diff line number Diff line
@@ -78,7 +78,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
			    struct bio *bio);
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
			    unsigned int *request_count);
			    unsigned int *request_count,
			    struct request **same_queue_rq);

void blk_account_io_start(struct request *req, bool new_io);
void blk_account_io_completion(struct request *req, unsigned int bytes);
+0 −3
Original line number Diff line number Diff line
@@ -128,9 +128,6 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
	struct bio_vec *bvec, *org_vec;
	int i;

	if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
		set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags);

	/*
	 * free up bounce indirect pages used
	 */
+32 −5
Original line number Diff line number Diff line
@@ -150,21 +150,48 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
	}
}

static int blkdev_reread_part(struct block_device *bdev)
/*
 * This is an exported API for the block driver, and will not
 * acquire bd_mutex. This API should be used in case that
 * caller has held bd_mutex already.
 */
int __blkdev_reread_part(struct block_device *bdev)
{
	struct gendisk *disk = bdev->bd_disk;
	int res;

	if (!disk_part_scan_enabled(disk) || bdev != bdev->bd_contains)
		return -EINVAL;
	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;
	if (!mutex_trylock(&bdev->bd_mutex))
		return -EBUSY;
	res = rescan_partitions(disk, bdev);

	lockdep_assert_held(&bdev->bd_mutex);

	return rescan_partitions(disk, bdev);
}
EXPORT_SYMBOL(__blkdev_reread_part);

/*
 * This is an exported API for the block driver, and will
 * try to acquire bd_mutex. If bd_mutex has been held already
 * in current context, please call __blkdev_reread_part().
 *
 * Make sure the held locks in current context aren't required
 * in open()/close() handler and I/O path for avoiding ABBA deadlock:
 * - bd_mutex is held before calling block driver's open/close
 *   handler
 * - reading partition table may submit I/O to the block device
 */
int blkdev_reread_part(struct block_device *bdev)
{
	int res;

	mutex_lock(&bdev->bd_mutex);
	res = __blkdev_reread_part(bdev);
	mutex_unlock(&bdev->bd_mutex);

	return res;
}
EXPORT_SYMBOL(blkdev_reread_part);

static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
			     uint64_t len, int secure)
Loading