Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6b66b2da authored by Robert Walsh's avatar Robert Walsh Committed by Roland Dreier
Browse files

IB/ipath: Don't corrupt pending mmap list when unmapped objects are freed



Fix the pending mmap code so it doesn't corrupt the list of pending
mmaps and crash the machine when pending mmaps are destroyed without
first being mapped.  Also, remove an unused variable, and use standard
kernel lists instead of our own homebrewed linked list implementation
to keep the pending mmap list.

Signed-off-by: default avatarRobert Walsh <robert.walsh@qlogic.com>
Signed-off-by: default avatarRalph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent 9ba6d552
Loading
Loading
Loading
Loading
+22 −29
Original line number Diff line number Diff line
@@ -243,33 +243,21 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
	 * See ipath_mmap() for details.
	 */
	if (udata && udata->outlen >= sizeof(__u64)) {
		struct ipath_mmap_info *ip;
		__u64 offset = (__u64) wc;
		int err;
		u32 s = sizeof *wc + sizeof(struct ib_wc) * entries;

		err = ib_copy_to_udata(udata, &offset, sizeof(offset));
		if (err) {
			ret = ERR_PTR(err);
		cq->ip = ipath_create_mmap_info(dev, s, context, wc);
		if (!cq->ip) {
			ret = ERR_PTR(-ENOMEM);
			goto bail_wc;
		}

		/* Allocate info for ipath_mmap(). */
		ip = kmalloc(sizeof(*ip), GFP_KERNEL);
		if (!ip) {
			ret = ERR_PTR(-ENOMEM);
			goto bail_wc;
		err = ib_copy_to_udata(udata, &cq->ip->offset,
				       sizeof(cq->ip->offset));
		if (err) {
			ret = ERR_PTR(err);
			goto bail_ip;
		}
		cq->ip = ip;
		ip->context = context;
		ip->obj = wc;
		kref_init(&ip->ref);
		ip->mmap_cnt = 0;
		ip->size = PAGE_ALIGN(sizeof(*wc) +
				      sizeof(struct ib_wc) * entries);
		spin_lock_irq(&dev->pending_lock);
		ip->next = dev->pending_mmaps;
		dev->pending_mmaps = ip;
		spin_unlock_irq(&dev->pending_lock);
	} else
		cq->ip = NULL;

@@ -277,12 +265,18 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
	if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
		spin_unlock(&dev->n_cqs_lock);
		ret = ERR_PTR(-ENOMEM);
		goto bail_wc;
		goto bail_ip;
	}

	dev->n_cqs_allocated++;
	spin_unlock(&dev->n_cqs_lock);

	if (cq->ip) {
		spin_lock_irq(&dev->pending_lock);
		list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
		spin_unlock_irq(&dev->pending_lock);
	}

	/*
	 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
	 * The number of entries should be >= the number requested or return
@@ -301,12 +295,12 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,

	goto done;

bail_ip:
	kfree(cq->ip);
bail_wc:
	vfree(wc);

bail_cq:
	kfree(cq);

done:
	return ret;
}
@@ -443,13 +437,12 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
	if (cq->ip) {
		struct ipath_ibdev *dev = to_idev(ibcq->device);
		struct ipath_mmap_info *ip = cq->ip;
		u32 s = sizeof *wc + sizeof(struct ib_wc) * cqe;

		ip->obj = wc;
		ip->size = PAGE_ALIGN(sizeof(*wc) +
				      sizeof(struct ib_wc) * cqe);
		ipath_update_mmap_info(dev, ip, s, wc);
		spin_lock_irq(&dev->pending_lock);
		ip->next = dev->pending_mmaps;
		dev->pending_mmaps = ip;
		if (list_empty(&ip->pending_mmaps))
			list_add(&ip->pending_mmaps, &dev->pending_mmaps);
		spin_unlock_irq(&dev->pending_lock);
	}

+58 −6
Original line number Diff line number Diff line
@@ -46,6 +46,11 @@ void ipath_release_mmap_info(struct kref *ref)
{
	struct ipath_mmap_info *ip =
		container_of(ref, struct ipath_mmap_info, ref);
	struct ipath_ibdev *dev = to_idev(ip->context->device);

	spin_lock_irq(&dev->pending_lock);
	list_del(&ip->pending_mmaps);
	spin_unlock_irq(&dev->pending_lock);

	vfree(ip->obj);
	kfree(ip);
@@ -60,14 +65,12 @@ static void ipath_vma_open(struct vm_area_struct *vma)
	struct ipath_mmap_info *ip = vma->vm_private_data;

	kref_get(&ip->ref);
	ip->mmap_cnt++;
}

static void ipath_vma_close(struct vm_area_struct *vma)
{
	struct ipath_mmap_info *ip = vma->vm_private_data;

	ip->mmap_cnt--;
	kref_put(&ip->ref, ipath_release_mmap_info);
}

@@ -87,7 +90,7 @@ int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
	struct ipath_ibdev *dev = to_idev(context->device);
	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
	unsigned long size = vma->vm_end - vma->vm_start;
	struct ipath_mmap_info *ip, **pp;
	struct ipath_mmap_info *ip, *pp;
	int ret = -EINVAL;

	/*
@@ -96,15 +99,16 @@ int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
	 * CQ, QP, or SRQ is soon followed by a call to mmap().
	 */
	spin_lock_irq(&dev->pending_lock);
	for (pp = &dev->pending_mmaps; (ip = *pp); pp = &ip->next) {
	list_for_each_entry_safe(ip, pp, &dev->pending_mmaps,
				 pending_mmaps) {
		/* Only the creator is allowed to mmap the object */
		if (context != ip->context || (void *) offset != ip->obj)
		if (context != ip->context || (__u64) offset != ip->offset)
			continue;
		/* Don't allow a mmap larger than the object. */
		if (size > ip->size)
			break;

		*pp = ip->next;
		list_del_init(&ip->pending_mmaps);
		spin_unlock_irq(&dev->pending_lock);

		ret = remap_vmalloc_range(vma, ip->obj, 0);
@@ -119,3 +123,51 @@ int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
done:
	return ret;
}

/*
 * Allocate information for ipath_mmap
 */
struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev,
					       u32 size,
					       struct ib_ucontext *context,
					       void *obj) {
	struct ipath_mmap_info *ip;

	ip = kmalloc(sizeof *ip, GFP_KERNEL);
	if (!ip)
		goto bail;

	size = PAGE_ALIGN(size);

	spin_lock_irq(&dev->mmap_offset_lock);
	if (dev->mmap_offset == 0)
		dev->mmap_offset = PAGE_SIZE;
	ip->offset = dev->mmap_offset;
	dev->mmap_offset += size;
	spin_unlock_irq(&dev->mmap_offset_lock);

	INIT_LIST_HEAD(&ip->pending_mmaps);
	ip->size = size;
	ip->context = context;
	ip->obj = obj;
	kref_init(&ip->ref);

bail:
	return ip;
}

void ipath_update_mmap_info(struct ipath_ibdev *dev,
			    struct ipath_mmap_info *ip,
			    u32 size, void *obj) {
	size = PAGE_ALIGN(size);

	spin_lock_irq(&dev->mmap_offset_lock);
	if (dev->mmap_offset == 0)
		dev->mmap_offset = PAGE_SIZE;
	ip->offset = dev->mmap_offset;
	dev->mmap_offset += size;
	spin_unlock_irq(&dev->mmap_offset_lock);

	ip->size = size;
	ip->obj = obj;
}
+30 −22
Original line number Diff line number Diff line
@@ -844,34 +844,36 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
	 * See ipath_mmap() for details.
	 */
	if (udata && udata->outlen >= sizeof(__u64)) {
		struct ipath_mmap_info *ip;
		__u64 offset = (__u64) qp->r_rq.wq;
		int err;

		err = ib_copy_to_udata(udata, &offset, sizeof(offset));
		if (!qp->r_rq.wq) {
			__u64 offset = 0;

			err = ib_copy_to_udata(udata, &offset,
					       sizeof(offset));
			if (err) {
				ret = ERR_PTR(err);
				goto bail_rwq;
			}

		if (qp->r_rq.wq) {
			/* Allocate info for ipath_mmap(). */
			ip = kmalloc(sizeof(*ip), GFP_KERNEL);
			if (!ip) {
		} else {
			u32 s = sizeof(struct ipath_rwq) +
				qp->r_rq.size * sz;

			qp->ip =
			    ipath_create_mmap_info(dev, s,
						   ibpd->uobject->context,
						   qp->r_rq.wq);
			if (!qp->ip) {
				ret = ERR_PTR(-ENOMEM);
				goto bail_rwq;
			}
			qp->ip = ip;
			ip->context = ibpd->uobject->context;
			ip->obj = qp->r_rq.wq;
			kref_init(&ip->ref);
			ip->mmap_cnt = 0;
			ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
					      qp->r_rq.size * sz);
			spin_lock_irq(&dev->pending_lock);
			ip->next = dev->pending_mmaps;
			dev->pending_mmaps = ip;
			spin_unlock_irq(&dev->pending_lock);

			err = ib_copy_to_udata(udata, &(qp->ip->offset),
					       sizeof(qp->ip->offset));
			if (err) {
				ret = ERR_PTR(err);
				goto bail_ip;
			}
		}
	}

@@ -885,6 +887,12 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
	dev->n_qps_allocated++;
	spin_unlock(&dev->n_qps_lock);

	if (qp->ip) {
		spin_lock_irq(&dev->pending_lock);
		list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
		spin_unlock_irq(&dev->pending_lock);
	}

	ret = &qp->ibqp;
	goto bail;

+26 −29
Original line number Diff line number Diff line
@@ -139,33 +139,24 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
	 * See ipath_mmap() for details.
	 */
	if (udata && udata->outlen >= sizeof(__u64)) {
		struct ipath_mmap_info *ip;
		__u64 offset = (__u64) srq->rq.wq;
		int err;
		u32 s = sizeof(struct ipath_rwq) + srq->rq.size * sz;

		err = ib_copy_to_udata(udata, &offset, sizeof(offset));
		if (err) {
			ret = ERR_PTR(err);
		srq->ip =
		    ipath_create_mmap_info(dev, s,
					   ibpd->uobject->context,
					   srq->rq.wq);
		if (!srq->ip) {
			ret = ERR_PTR(-ENOMEM);
			goto bail_wq;
		}

		/* Allocate info for ipath_mmap(). */
		ip = kmalloc(sizeof(*ip), GFP_KERNEL);
		if (!ip) {
			ret = ERR_PTR(-ENOMEM);
			goto bail_wq;
		err = ib_copy_to_udata(udata, &srq->ip->offset,
				       sizeof(srq->ip->offset));
		if (err) {
			ret = ERR_PTR(err);
			goto bail_ip;
		}
		srq->ip = ip;
		ip->context = ibpd->uobject->context;
		ip->obj = srq->rq.wq;
		kref_init(&ip->ref);
		ip->mmap_cnt = 0;
		ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
				      srq->rq.size * sz);
		spin_lock_irq(&dev->pending_lock);
		ip->next = dev->pending_mmaps;
		dev->pending_mmaps = ip;
		spin_unlock_irq(&dev->pending_lock);
	} else
		srq->ip = NULL;

@@ -181,21 +172,27 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
	if (dev->n_srqs_allocated == ib_ipath_max_srqs) {
		spin_unlock(&dev->n_srqs_lock);
		ret = ERR_PTR(-ENOMEM);
		goto bail_wq;
		goto bail_ip;
	}

 	dev->n_srqs_allocated++;
	spin_unlock(&dev->n_srqs_lock);

	if (srq->ip) {
		spin_lock_irq(&dev->pending_lock);
		list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
		spin_unlock_irq(&dev->pending_lock);
	}

	ret = &srq->ibsrq;
	goto done;

bail_ip:
	kfree(srq->ip);
bail_wq:
	vfree(srq->rq.wq);

bail_srq:
	kfree(srq);

done:
	return ret;
}
@@ -312,13 +309,13 @@ int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
		if (srq->ip) {
			struct ipath_mmap_info *ip = srq->ip;
			struct ipath_ibdev *dev = to_idev(srq->ibsrq.device);
			u32 s = sizeof(struct ipath_rwq) + size * sz;

			ip->obj = wq;
			ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
					      size * sz);
			ipath_update_mmap_info(dev, ip, s, wq);
			spin_lock_irq(&dev->pending_lock);
			ip->next = dev->pending_mmaps;
			dev->pending_mmaps = ip;
			if (list_empty(&ip->pending_mmaps))
				list_add(&ip->pending_mmaps,
					 &dev->pending_mmaps);
			spin_unlock_irq(&dev->pending_lock);
		}
	} else if (attr_mask & IB_SRQ_LIMIT) {
+3 −0
Original line number Diff line number Diff line
@@ -1476,7 +1476,10 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
		ret = -ENOMEM;
		goto err_lk;
	}
	INIT_LIST_HEAD(&idev->pending_mmaps);
	spin_lock_init(&idev->pending_lock);
	idev->mmap_offset = PAGE_SIZE;
	spin_lock_init(&idev->mmap_offset_lock);
	INIT_LIST_HEAD(&idev->pending[0]);
	INIT_LIST_HEAD(&idev->pending[1]);
	INIT_LIST_HEAD(&idev->pending[2]);
Loading