Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8e908e99 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull two block layer fixes from Jens Axboe:
 "Two small patches that should make it into 3.19:

   - a fixup from me for NVMe, making the cq_vector a signed variable.
     Otherwise our -1 comparison fails, and commit 2b25d981 doesn't
     do what it was supposed to.

   - a fixup for the hotplug handling for blk-mq from Ming Lei, using
     the proper kobject referencing to ensure we release resources at
     the right time"

* 'for-linus' of git://git.kernel.dk/linux-block:
  blk-mq: fix hctx/ctx kobject use-after-free
  NVMe: cq_vector should be signed
parents 440e9960 76d697d1
Loading
Loading
Loading
Loading
+23 −2
Original line number Original line Diff line number Diff line
@@ -15,6 +15,26 @@


static void blk_mq_sysfs_release(struct kobject *kobj)
static void blk_mq_sysfs_release(struct kobject *kobj)
{
{
	struct request_queue *q;

	q = container_of(kobj, struct request_queue, mq_kobj);
	free_percpu(q->queue_ctx);
}

static void blk_mq_ctx_release(struct kobject *kobj)
{
	struct blk_mq_ctx *ctx;

	ctx = container_of(kobj, struct blk_mq_ctx, kobj);
	kobject_put(&ctx->queue->mq_kobj);
}

static void blk_mq_hctx_release(struct kobject *kobj)
{
	struct blk_mq_hw_ctx *hctx;

	hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
	kfree(hctx);
}
}


struct blk_mq_ctx_sysfs_entry {
struct blk_mq_ctx_sysfs_entry {
@@ -318,13 +338,13 @@ static struct kobj_type blk_mq_ktype = {
static struct kobj_type blk_mq_ctx_ktype = {
static struct kobj_type blk_mq_ctx_ktype = {
	.sysfs_ops	= &blk_mq_sysfs_ops,
	.sysfs_ops	= &blk_mq_sysfs_ops,
	.default_attrs	= default_ctx_attrs,
	.default_attrs	= default_ctx_attrs,
	.release	= blk_mq_sysfs_release,
	.release	= blk_mq_ctx_release,
};
};


static struct kobj_type blk_mq_hw_ktype = {
static struct kobj_type blk_mq_hw_ktype = {
	.sysfs_ops	= &blk_mq_hw_sysfs_ops,
	.sysfs_ops	= &blk_mq_hw_sysfs_ops,
	.default_attrs	= default_hw_ctx_attrs,
	.default_attrs	= default_hw_ctx_attrs,
	.release	= blk_mq_sysfs_release,
	.release	= blk_mq_hctx_release,
};
};


static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
@@ -355,6 +375,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
		return ret;
		return ret;


	hctx_for_each_ctx(hctx, ctx, i) {
	hctx_for_each_ctx(hctx, ctx, i) {
		kobject_get(&q->mq_kobj);
		ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
		ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
		if (ret)
		if (ret)
			break;
			break;
+1 −5
Original line number Original line Diff line number Diff line
@@ -1641,10 +1641,8 @@ static void blk_mq_free_hw_queues(struct request_queue *q,
	struct blk_mq_hw_ctx *hctx;
	struct blk_mq_hw_ctx *hctx;
	unsigned int i;
	unsigned int i;


	queue_for_each_hw_ctx(q, hctx, i) {
	queue_for_each_hw_ctx(q, hctx, i)
		free_cpumask_var(hctx->cpumask);
		free_cpumask_var(hctx->cpumask);
		kfree(hctx);
	}
}
}


static int blk_mq_init_hctx(struct request_queue *q,
static int blk_mq_init_hctx(struct request_queue *q,
@@ -2002,11 +2000,9 @@ void blk_mq_free_queue(struct request_queue *q)


	percpu_ref_exit(&q->mq_usage_counter);
	percpu_ref_exit(&q->mq_usage_counter);


	free_percpu(q->queue_ctx);
	kfree(q->queue_hw_ctx);
	kfree(q->queue_hw_ctx);
	kfree(q->mq_map);
	kfree(q->mq_map);


	q->queue_ctx = NULL;
	q->queue_hw_ctx = NULL;
	q->queue_hw_ctx = NULL;
	q->mq_map = NULL;
	q->mq_map = NULL;


+1 −1
Original line number Original line Diff line number Diff line
@@ -106,7 +106,7 @@ struct nvme_queue {
	dma_addr_t cq_dma_addr;
	dma_addr_t cq_dma_addr;
	u32 __iomem *q_db;
	u32 __iomem *q_db;
	u16 q_depth;
	u16 q_depth;
	u16 cq_vector;
	s16 cq_vector;
	u16 sq_head;
	u16 sq_head;
	u16 sq_tail;
	u16 sq_tail;
	u16 cq_head;
	u16 cq_head;