Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0d945c1f authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

block: remove the queue_lock indirection



With the legacy request path gone there is no good reason to keep
queue_lock as a pointer, we can always use the embedded lock now.

Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>

Fixed floppy and blk-cgroup missing conversions and half done edits.

Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 6d469642
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -334,7 +334,7 @@ static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)

	parent = bfqg_parent(bfqg);

	lockdep_assert_held(bfqg_to_blkg(bfqg)->q->queue_lock);
	lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock);

	if (unlikely(!parent))
		return;
+8 −8
Original line number Diff line number Diff line
@@ -399,9 +399,9 @@ static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
		unsigned long flags;
		struct bfq_io_cq *icq;

		spin_lock_irqsave(q->queue_lock, flags);
		spin_lock_irqsave(&q->queue_lock, flags);
		icq = icq_to_bic(ioc_lookup_icq(ioc, q));
		spin_unlock_irqrestore(q->queue_lock, flags);
		spin_unlock_irqrestore(&q->queue_lock, flags);

		return icq;
	}
@@ -4034,7 +4034,7 @@ static void bfq_update_dispatch_stats(struct request_queue *q,
	 * In addition, the following queue lock guarantees that
	 * bfqq_group(bfqq) exists as well.
	 */
	spin_lock_irq(q->queue_lock);
	spin_lock_irq(&q->queue_lock);
	if (idle_timer_disabled)
		/*
		 * Since the idle timer has been disabled,
@@ -4053,7 +4053,7 @@ static void bfq_update_dispatch_stats(struct request_queue *q,
		bfqg_stats_set_start_empty_time(bfqg);
		bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
	}
	spin_unlock_irq(q->queue_lock);
	spin_unlock_irq(&q->queue_lock);
}
#else
static inline void bfq_update_dispatch_stats(struct request_queue *q,
@@ -4637,11 +4637,11 @@ static void bfq_update_insert_stats(struct request_queue *q,
	 * In addition, the following queue lock guarantees that
	 * bfqq_group(bfqq) exists as well.
	 */
	spin_lock_irq(q->queue_lock);
	spin_lock_irq(&q->queue_lock);
	bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
	if (idle_timer_disabled)
		bfqg_stats_update_idle_time(bfqq_group(bfqq));
	spin_unlock_irq(q->queue_lock);
	spin_unlock_irq(&q->queue_lock);
}
#else
static inline void bfq_update_insert_stats(struct request_queue *q,
@@ -5382,9 +5382,9 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
	}
	eq->elevator_data = bfqd;

	spin_lock_irq(q->queue_lock);
	spin_lock_irq(&q->queue_lock);
	q->elevator = eq;
	spin_unlock_irq(q->queue_lock);
	spin_unlock_irq(&q->queue_lock);

	/*
	 * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
+31 −31
Original line number Diff line number Diff line
@@ -147,7 +147,7 @@ struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
	blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
	if (blkg && blkg->q == q) {
		if (update_hint) {
			lockdep_assert_held(q->queue_lock);
			lockdep_assert_held(&q->queue_lock);
			rcu_assign_pointer(blkcg->blkg_hint, blkg);
		}
		return blkg;
@@ -170,7 +170,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
	int i, ret;

	WARN_ON_ONCE(!rcu_read_lock_held());
	lockdep_assert_held(q->queue_lock);
	lockdep_assert_held(&q->queue_lock);

	/* blkg holds a reference to blkcg */
	if (!css_tryget_online(&blkcg->css)) {
@@ -268,7 +268,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
	struct blkcg_gq *blkg;

	WARN_ON_ONCE(!rcu_read_lock_held());
	lockdep_assert_held(q->queue_lock);
	lockdep_assert_held(&q->queue_lock);

	blkg = __blkg_lookup(blkcg, q, true);
	if (blkg)
@@ -299,7 +299,7 @@ static void blkg_destroy(struct blkcg_gq *blkg)
	struct blkcg_gq *parent = blkg->parent;
	int i;

	lockdep_assert_held(blkg->q->queue_lock);
	lockdep_assert_held(&blkg->q->queue_lock);
	lockdep_assert_held(&blkcg->lock);

	/* Something wrong if we are trying to remove same group twice */
@@ -349,7 +349,7 @@ static void blkg_destroy_all(struct request_queue *q)
{
	struct blkcg_gq *blkg, *n;

	spin_lock_irq(q->queue_lock);
	spin_lock_irq(&q->queue_lock);
	list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
		struct blkcg *blkcg = blkg->blkcg;

@@ -359,7 +359,7 @@ static void blkg_destroy_all(struct request_queue *q)
	}

	q->root_blkg = NULL;
	spin_unlock_irq(q->queue_lock);
	spin_unlock_irq(&q->queue_lock);
}

/*
@@ -454,10 +454,10 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,

	rcu_read_lock();
	hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
		spin_lock_irq(blkg->q->queue_lock);
		spin_lock_irq(&blkg->q->queue_lock);
		if (blkcg_policy_enabled(blkg->q, pol))
			total += prfill(sf, blkg->pd[pol->plid], data);
		spin_unlock_irq(blkg->q->queue_lock);
		spin_unlock_irq(&blkg->q->queue_lock);
	}
	rcu_read_unlock();

@@ -655,7 +655,7 @@ u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
	struct cgroup_subsys_state *pos_css;
	u64 sum = 0;

	lockdep_assert_held(blkg->q->queue_lock);
	lockdep_assert_held(&blkg->q->queue_lock);

	rcu_read_lock();
	blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
@@ -698,7 +698,7 @@ struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
	struct blkg_rwstat sum = { };
	int i;

	lockdep_assert_held(blkg->q->queue_lock);
	lockdep_assert_held(&blkg->q->queue_lock);

	rcu_read_lock();
	blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
@@ -729,7 +729,7 @@ static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
					  struct request_queue *q)
{
	WARN_ON_ONCE(!rcu_read_lock_held());
	lockdep_assert_held(q->queue_lock);
	lockdep_assert_held(&q->queue_lock);

	if (!blkcg_policy_enabled(q, pol))
		return ERR_PTR(-EOPNOTSUPP);
@@ -750,7 +750,7 @@ static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
 */
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
		   char *input, struct blkg_conf_ctx *ctx)
	__acquires(rcu) __acquires(disk->queue->queue_lock)
	__acquires(rcu) __acquires(&disk->queue->queue_lock)
{
	struct gendisk *disk;
	struct request_queue *q;
@@ -778,7 +778,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
	q = disk->queue;

	rcu_read_lock();
	spin_lock_irq(q->queue_lock);
	spin_lock_irq(&q->queue_lock);

	blkg = blkg_lookup_check(blkcg, pol, q);
	if (IS_ERR(blkg)) {
@@ -805,7 +805,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
		}

		/* Drop locks to do new blkg allocation with GFP_KERNEL. */
		spin_unlock_irq(q->queue_lock);
		spin_unlock_irq(&q->queue_lock);
		rcu_read_unlock();

		new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
@@ -815,7 +815,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
		}

		rcu_read_lock();
		spin_lock_irq(q->queue_lock);
		spin_lock_irq(&q->queue_lock);

		blkg = blkg_lookup_check(pos, pol, q);
		if (IS_ERR(blkg)) {
@@ -843,7 +843,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
	return 0;

fail_unlock:
	spin_unlock_irq(q->queue_lock);
	spin_unlock_irq(&q->queue_lock);
	rcu_read_unlock();
fail:
	put_disk_and_module(disk);
@@ -868,9 +868,9 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
 * with blkg_conf_prep().
 */
void blkg_conf_finish(struct blkg_conf_ctx *ctx)
	__releases(ctx->disk->queue->queue_lock) __releases(rcu)
	__releases(&ctx->disk->queue->queue_lock) __releases(rcu)
{
	spin_unlock_irq(ctx->disk->queue->queue_lock);
	spin_unlock_irq(&ctx->disk->queue->queue_lock);
	rcu_read_unlock();
	put_disk_and_module(ctx->disk);
}
@@ -903,7 +903,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
		 */
		off += scnprintf(buf+off, size-off, "%s ", dname);

		spin_lock_irq(blkg->q->queue_lock);
		spin_lock_irq(&blkg->q->queue_lock);

		rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
					offsetof(struct blkcg_gq, stat_bytes));
@@ -917,7 +917,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
		wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
		dios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);

		spin_unlock_irq(blkg->q->queue_lock);
		spin_unlock_irq(&blkg->q->queue_lock);

		if (rbytes || wbytes || rios || wios) {
			has_stats = true;
@@ -1038,9 +1038,9 @@ void blkcg_destroy_blkgs(struct blkcg *blkcg)
						struct blkcg_gq, blkcg_node);
		struct request_queue *q = blkg->q;

		if (spin_trylock(q->queue_lock)) {
		if (spin_trylock(&q->queue_lock)) {
			blkg_destroy(blkg);
			spin_unlock(q->queue_lock);
			spin_unlock(&q->queue_lock);
		} else {
			spin_unlock_irq(&blkcg->lock);
			cpu_relax();
@@ -1161,12 +1161,12 @@ int blkcg_init_queue(struct request_queue *q)

	/* Make sure the root blkg exists. */
	rcu_read_lock();
	spin_lock_irq(q->queue_lock);
	spin_lock_irq(&q->queue_lock);
	blkg = blkg_create(&blkcg_root, q, new_blkg);
	if (IS_ERR(blkg))
		goto err_unlock;
	q->root_blkg = blkg;
	spin_unlock_irq(q->queue_lock);
	spin_unlock_irq(&q->queue_lock);
	rcu_read_unlock();

	if (preloaded)
@@ -1185,7 +1185,7 @@ int blkcg_init_queue(struct request_queue *q)
	blkg_destroy_all(q);
	return ret;
err_unlock:
	spin_unlock_irq(q->queue_lock);
	spin_unlock_irq(&q->queue_lock);
	rcu_read_unlock();
	if (preloaded)
		radix_tree_preload_end();
@@ -1200,7 +1200,7 @@ int blkcg_init_queue(struct request_queue *q)
 */
void blkcg_drain_queue(struct request_queue *q)
{
	lockdep_assert_held(q->queue_lock);
	lockdep_assert_held(&q->queue_lock);

	/*
	 * @q could be exiting and already have destroyed all blkgs as
@@ -1335,7 +1335,7 @@ int blkcg_activate_policy(struct request_queue *q,
		}
	}

	spin_lock_irq(q->queue_lock);
	spin_lock_irq(&q->queue_lock);

	list_for_each_entry(blkg, &q->blkg_list, q_node) {
		struct blkg_policy_data *pd;
@@ -1347,7 +1347,7 @@ int blkcg_activate_policy(struct request_queue *q,
		if (!pd)
			swap(pd, pd_prealloc);
		if (!pd) {
			spin_unlock_irq(q->queue_lock);
			spin_unlock_irq(&q->queue_lock);
			goto pd_prealloc;
		}

@@ -1361,7 +1361,7 @@ int blkcg_activate_policy(struct request_queue *q,
	__set_bit(pol->plid, q->blkcg_pols);
	ret = 0;

	spin_unlock_irq(q->queue_lock);
	spin_unlock_irq(&q->queue_lock);
out_bypass_end:
	if (q->mq_ops)
		blk_mq_unfreeze_queue(q);
@@ -1390,7 +1390,7 @@ void blkcg_deactivate_policy(struct request_queue *q,
	if (q->mq_ops)
		blk_mq_freeze_queue(q);

	spin_lock_irq(q->queue_lock);
	spin_lock_irq(&q->queue_lock);

	__clear_bit(pol->plid, q->blkcg_pols);

@@ -1403,7 +1403,7 @@ void blkcg_deactivate_policy(struct request_queue *q,
		}
	}

	spin_unlock_irq(q->queue_lock);
	spin_unlock_irq(&q->queue_lock);

	if (q->mq_ops)
		blk_mq_unfreeze_queue(q);
+1 −9
Original line number Diff line number Diff line
@@ -327,8 +327,6 @@ void blk_exit_queue(struct request_queue *q)
 */
void blk_cleanup_queue(struct request_queue *q)
{
	spinlock_t *lock = q->queue_lock;

	/* mark @q DYING, no new request or merges will be allowed afterwards */
	mutex_lock(&q->sysfs_lock);
	blk_set_queue_dying(q);
@@ -381,11 +379,6 @@ void blk_cleanup_queue(struct request_queue *q)

	percpu_ref_exit(&q->q_usage_counter);

	spin_lock_irq(lock);
	if (q->queue_lock != &q->__queue_lock)
		q->queue_lock = &q->__queue_lock;
	spin_unlock_irq(lock);

	/* @q is and will stay empty, shutdown and put */
	blk_put_queue(q);
}
@@ -524,8 +517,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
	mutex_init(&q->blk_trace_mutex);
#endif
	mutex_init(&q->sysfs_lock);
	spin_lock_init(&q->__queue_lock);
	q->queue_lock = &q->__queue_lock;
	spin_lock_init(&q->queue_lock);

	init_waitqueue_head(&q->mq_freeze_wq);

+7 −7
Original line number Diff line number Diff line
@@ -110,9 +110,9 @@ static void ioc_release_fn(struct work_struct *work)
						struct io_cq, ioc_node);
		struct request_queue *q = icq->q;

		if (spin_trylock(q->queue_lock)) {
		if (spin_trylock(&q->queue_lock)) {
			ioc_destroy_icq(icq);
			spin_unlock(q->queue_lock);
			spin_unlock(&q->queue_lock);
		} else {
			spin_unlock_irqrestore(&ioc->lock, flags);
			cpu_relax();
@@ -233,9 +233,9 @@ void ioc_clear_queue(struct request_queue *q)
{
	LIST_HEAD(icq_list);

	spin_lock_irq(q->queue_lock);
	spin_lock_irq(&q->queue_lock);
	list_splice_init(&q->icq_list, &icq_list);
	spin_unlock_irq(q->queue_lock);
	spin_unlock_irq(&q->queue_lock);

	__ioc_clear_queue(&icq_list);
}
@@ -326,7 +326,7 @@ struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
{
	struct io_cq *icq;

	lockdep_assert_held(q->queue_lock);
	lockdep_assert_held(&q->queue_lock);

	/*
	 * icq's are indexed from @ioc using radix tree and hint pointer,
@@ -385,7 +385,7 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
	INIT_HLIST_NODE(&icq->ioc_node);

	/* lock both q and ioc and try to link @icq */
	spin_lock_irq(q->queue_lock);
	spin_lock_irq(&q->queue_lock);
	spin_lock(&ioc->lock);

	if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
@@ -401,7 +401,7 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
	}

	spin_unlock(&ioc->lock);
	spin_unlock_irq(q->queue_lock);
	spin_unlock_irq(&q->queue_lock);
	radix_tree_preload_end();
	return icq;
}
Loading