Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4bb46608 authored by Doug Ledford's avatar Doug Ledford
Browse files

Merge branch 'k.o/for-rc' into k.o/wip/dl-for-next



There is a 14 patch series waiting to come into for-next that has a
dependecy on code submitted into this kernel's for-rc series.  So, merge
the for-rc branch into the current for-next in order to make the patch
series apply cleanly.

Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parents f76a5c75 f4576587
Loading
Loading
Loading
Loading
+6 −1
Original line number Diff line number Diff line
@@ -305,16 +305,21 @@ void nldev_exit(void);
static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
					  struct ib_pd *pd,
					  struct ib_qp_init_attr *attr,
					  struct ib_udata *udata)
					  struct ib_udata *udata,
					  struct ib_uobject *uobj)
{
	struct ib_qp *qp;

	if (!dev->create_qp)
		return ERR_PTR(-EOPNOTSUPP);

	qp = dev->create_qp(pd, attr, udata);
	if (IS_ERR(qp))
		return qp;

	qp->device = dev;
	qp->pd = pd;
	qp->uobject = uobj;
	/*
	 * We don't track XRC QPs for now, because they don't have PD
	 * and more importantly they are created internaly by driver,
+28 −10
Original line number Diff line number Diff line
@@ -141,7 +141,12 @@ static struct ib_uobject *alloc_uobj(struct ib_ucontext *context,
	 */
	uobj->context = context;
	uobj->type = type;
	atomic_set(&uobj->usecnt, 0);
	/*
	 * Allocated objects start out as write locked to deny any other
	 * syscalls from accessing them until they are committed. See
	 * rdma_alloc_commit_uobject
	 */
	atomic_set(&uobj->usecnt, -1);
	kref_init(&uobj->ref);

	return uobj;
@@ -196,7 +201,15 @@ static struct ib_uobject *lookup_get_idr_uobject(const struct uverbs_obj_type *t
		goto free;
	}

	uverbs_uobject_get(uobj);
	/*
	 * The idr_find is guaranteed to return a pointer to something that
	 * isn't freed yet, or NULL, as the free after idr_remove goes through
	 * kfree_rcu(). However the object may still have been released and
	 * kfree() could be called at any time.
	 */
	if (!kref_get_unless_zero(&uobj->ref))
		uobj = ERR_PTR(-ENOENT);

free:
	rcu_read_unlock();
	return uobj;
@@ -399,13 +412,13 @@ static int __must_check remove_commit_fd_uobject(struct ib_uobject *uobj,
	return ret;
}

static void lockdep_check(struct ib_uobject *uobj, bool exclusive)
static void assert_uverbs_usecnt(struct ib_uobject *uobj, bool exclusive)
{
#ifdef CONFIG_LOCKDEP
	if (exclusive)
		WARN_ON(atomic_read(&uobj->usecnt) > 0);
		WARN_ON(atomic_read(&uobj->usecnt) != -1);
	else
		WARN_ON(atomic_read(&uobj->usecnt) == -1);
		WARN_ON(atomic_read(&uobj->usecnt) <= 0);
#endif
}

@@ -444,7 +457,7 @@ int __must_check rdma_remove_commit_uobject(struct ib_uobject *uobj)
		WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
		return 0;
	}
	lockdep_check(uobj, true);
	assert_uverbs_usecnt(uobj, true);
	ret = _rdma_remove_commit_uobject(uobj, RDMA_REMOVE_DESTROY);

	up_read(&ucontext->cleanup_rwsem);
@@ -474,16 +487,17 @@ int rdma_explicit_destroy(struct ib_uobject *uobject)
		WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
		return 0;
	}
	lockdep_check(uobject, true);
	assert_uverbs_usecnt(uobject, true);
	ret = uobject->type->type_class->remove_commit(uobject,
						       RDMA_REMOVE_DESTROY);
	if (ret)
		return ret;
		goto out;

	uobject->type = &null_obj_type;

out:
	up_read(&ucontext->cleanup_rwsem);
	return 0;
	return ret;
}

static void alloc_commit_idr_uobject(struct ib_uobject *uobj)
@@ -527,6 +541,10 @@ int rdma_alloc_commit_uobject(struct ib_uobject *uobj)
		return ret;
	}

	/* matches atomic_set(-1) in alloc_uobj */
	assert_uverbs_usecnt(uobj, true);
	atomic_set(&uobj->usecnt, 0);

	uobj->type->type_class->alloc_commit(uobj);
	up_read(&uobj->context->cleanup_rwsem);

@@ -561,7 +579,7 @@ static void lookup_put_fd_uobject(struct ib_uobject *uobj, bool exclusive)

void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive)
{
	lockdep_check(uobj, exclusive);
	assert_uverbs_usecnt(uobj, exclusive);
	uobj->type->type_class->lookup_put(uobj, exclusive);
	/*
	 * In order to unlock an object, either decrease its usecnt for
+16 −7
Original line number Diff line number Diff line
@@ -7,7 +7,6 @@
#include <rdma/restrack.h>
#include <linux/mutex.h>
#include <linux/sched/task.h>
#include <linux/uaccess.h>
#include <linux/pid_namespace.h>

void rdma_restrack_init(struct rdma_restrack_root *res)
@@ -63,7 +62,6 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
{
	enum rdma_restrack_type type = res->type;
	struct ib_device *dev;
	struct ib_xrcd *xrcd;
	struct ib_pd *pd;
	struct ib_cq *cq;
	struct ib_qp *qp;
@@ -81,10 +79,6 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
		qp = container_of(res, struct ib_qp, res);
		dev = qp->device;
		break;
	case RDMA_RESTRACK_XRCD:
		xrcd = container_of(res, struct ib_xrcd, res);
		dev = xrcd->device;
		break;
	default:
		WARN_ONCE(true, "Wrong resource tracking type %u\n", type);
		return NULL;
@@ -93,6 +87,21 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
	return dev;
}

static bool res_is_user(struct rdma_restrack_entry *res)
{
	switch (res->type) {
	case RDMA_RESTRACK_PD:
		return container_of(res, struct ib_pd, res)->uobject;
	case RDMA_RESTRACK_CQ:
		return container_of(res, struct ib_cq, res)->uobject;
	case RDMA_RESTRACK_QP:
		return container_of(res, struct ib_qp, res)->uobject;
	default:
		WARN_ONCE(true, "Wrong resource tracking type %u\n", res->type);
		return false;
	}
}

void rdma_restrack_add(struct rdma_restrack_entry *res)
{
	struct ib_device *dev = res_to_dev(res);
@@ -100,7 +109,7 @@ void rdma_restrack_add(struct rdma_restrack_entry *res)
	if (!dev)
		return;

	if (!uaccess_kernel()) {
	if (res_is_user(res)) {
		get_task_struct(current);
		res->task = current;
		res->kern_name = NULL;
+40 −10
Original line number Diff line number Diff line
@@ -562,9 +562,10 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
	if (f.file)
		fdput(f);

	mutex_unlock(&file->device->xrcd_tree_mutex);

	uobj_alloc_commit(&obj->uobject);

	mutex_unlock(&file->device->xrcd_tree_mutex);
	return in_len;

err_copy:
@@ -603,10 +604,8 @@ ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,

	uobj  = uobj_get_write(uobj_get_type(xrcd), cmd.xrcd_handle,
			       file->ucontext);
	if (IS_ERR(uobj)) {
		mutex_unlock(&file->device->xrcd_tree_mutex);
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);
	}

	ret = uobj_remove_commit(uobj);
	return ret ?: in_len;
@@ -979,6 +978,9 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
	struct ib_uverbs_ex_create_cq_resp resp;
	struct ib_cq_init_attr attr = {};

	if (!ib_dev->create_cq)
		return ERR_PTR(-EOPNOTSUPP);

	if (cmd->comp_vector >= file->device->num_comp_vectors)
		return ERR_PTR(-EINVAL);

@@ -1030,14 +1032,14 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
	resp.response_length = offsetof(typeof(resp), response_length) +
		sizeof(resp.response_length);

	cq->res.type = RDMA_RESTRACK_CQ;
	rdma_restrack_add(&cq->res);

	ret = cb(file, obj, &resp, ucore, context);
	if (ret)
		goto err_cb;

	uobj_alloc_commit(&obj->uobject);
	cq->res.type = RDMA_RESTRACK_CQ;
	rdma_restrack_add(&cq->res);

	return obj;

err_cb:
@@ -1518,7 +1520,8 @@ static int create_qp(struct ib_uverbs_file *file,
	if (cmd->qp_type == IB_QPT_XRC_TGT)
		qp = ib_create_qp(pd, &attr);
	else
		qp = _ib_create_qp(device, pd, &attr, uhw);
		qp = _ib_create_qp(device, pd, &attr, uhw,
				   &obj->uevent.uobject);

	if (IS_ERR(qp)) {
		ret = PTR_ERR(qp);
@@ -1550,8 +1553,10 @@ static int create_qp(struct ib_uverbs_file *file,
			atomic_inc(&attr.srq->usecnt);
		if (ind_tbl)
			atomic_inc(&ind_tbl->usecnt);
	}
	} else {
		/* It is done in _ib_create_qp for other QP types */
		qp->uobject = &obj->uevent.uobject;
	}

	obj->uevent.uobject.object = qp;

@@ -1971,8 +1976,15 @@ static int modify_qp(struct ib_uverbs_file *file,
		goto release_qp;
	}

	if ((cmd->base.attr_mask & IB_QP_AV) &&
	    !rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
		ret = -EINVAL;
		goto release_qp;
	}

	if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
	    !rdma_is_port_valid(qp->device, cmd->base.alt_port_num)) {
	    (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) ||
	    !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num))) {
		ret = -EINVAL;
		goto release_qp;
	}
@@ -2941,6 +2953,11 @@ int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file,
		wq_init_attr.create_flags = cmd.create_flags;
	obj->uevent.events_reported = 0;
	INIT_LIST_HEAD(&obj->uevent.event_list);

	if (!pd->device->create_wq) {
		err = -EOPNOTSUPP;
		goto err_put_cq;
	}
	wq = pd->device->create_wq(pd, &wq_init_attr, uhw);
	if (IS_ERR(wq)) {
		err = PTR_ERR(wq);
@@ -3084,7 +3101,12 @@ int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file,
		wq_attr.flags = cmd.flags;
		wq_attr.flags_mask = cmd.flags_mask;
	}
	if (!wq->device->modify_wq) {
		ret = -EOPNOTSUPP;
		goto out;
	}
	ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw);
out:
	uobj_put_obj_read(wq);
	return ret;
}
@@ -3181,6 +3203,11 @@ int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file,

	init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
	init_attr.ind_tbl = wqs;

	if (!ib_dev->create_rwq_ind_table) {
		err = -EOPNOTSUPP;
		goto err_uobj;
	}
	rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw);

	if (IS_ERR(rwq_ind_tbl)) {
@@ -3770,6 +3797,9 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
	struct ib_device_attr attr = {0};
	int err;

	if (!ib_dev->query_device)
		return -EOPNOTSUPP;

	if (ucore->inlen < sizeof(cmd))
		return -EINVAL;

+3 −0
Original line number Diff line number Diff line
@@ -59,6 +59,9 @@ static int uverbs_process_attr(struct ib_device *ibdev,
			return 0;
	}

	if (test_bit(attr_id, attr_bundle_h->valid_bitmap))
		return -EINVAL;

	spec = &attr_spec_bucket->attrs[attr_id];
	e = &elements[attr_id];
	e->uattr = uattr_ptr;
Loading