Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7f1d25b4 authored by Doug Ledford's avatar Doug Ledford
Browse files

Merge branches 'misc' and 'rxe' into k.o/for-4.8-1

Loading
Loading
Loading
Loading
+9 −0
Original line number Diff line number Diff line
@@ -7444,6 +7444,15 @@ W: http://www.mellanox.com
Q:	http://patchwork.ozlabs.org/project/netdev/list/
F:	drivers/net/ethernet/mellanox/mlxsw/

SOFT-ROCE DRIVER (rxe)
M:	Moni Shoua <monis@mellanox.com>
L:	linux-rdma@vger.kernel.org
S:	Supported
W:	https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home
Q:	http://patchwork.kernel.org/project/linux-rdma/list/
F:	drivers/infiniband/hw/rxe/
F:	include/uapi/rdma/rdma_user_rxe.h

MEMBARRIER SUPPORT
M:	Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
M:	"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+1 −0
Original line number Diff line number Diff line
@@ -84,6 +84,7 @@ source "drivers/infiniband/ulp/iser/Kconfig"
source "drivers/infiniband/ulp/isert/Kconfig"

source "drivers/infiniband/sw/rdmavt/Kconfig"
source "drivers/infiniband/sw/rxe/Kconfig"

source "drivers/infiniband/hw/hfi1/Kconfig"

+93 −7
Original line number Diff line number Diff line
@@ -68,6 +68,7 @@ MODULE_DESCRIPTION("Generic RDMA CM Agent");
MODULE_LICENSE("Dual BSD/GPL");

#define CMA_CM_RESPONSE_TIMEOUT 20
#define CMA_QUERY_CLASSPORT_INFO_TIMEOUT 3000
#define CMA_MAX_CM_RETRIES 15
#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
#define CMA_IBOE_PACKET_LIFETIME 18
@@ -162,6 +163,14 @@ struct rdma_bind_list {
	unsigned short		port;
};

struct class_port_info_context {
	struct ib_class_port_info	*class_port_info;
	struct ib_device		*device;
	struct completion		done;
	struct ib_sa_query		*sa_query;
	u8				port_num;
};

static int cma_ps_alloc(struct net *net, enum rdma_port_space ps,
			struct rdma_bind_list *bind_list, int snum)
{
@@ -306,6 +315,7 @@ struct cma_multicast {
	struct sockaddr_storage	addr;
	struct kref		mcref;
	bool			igmp_joined;
	u8			join_state;
};

struct cma_work {
@@ -3754,10 +3764,63 @@ static void cma_set_mgid(struct rdma_id_private *id_priv,
	}
}

static void cma_query_sa_classport_info_cb(int status,
					   struct ib_class_port_info *rec,
					   void *context)
{
	struct class_port_info_context *cb_ctx = context;

	WARN_ON(!context);

	if (status || !rec) {
		pr_debug("RDMA CM: %s port %u failed query ClassPortInfo status: %d\n",
			 cb_ctx->device->name, cb_ctx->port_num, status);
		goto out;
	}

	memcpy(cb_ctx->class_port_info, rec, sizeof(struct ib_class_port_info));

out:
	complete(&cb_ctx->done);
}

static int cma_query_sa_classport_info(struct ib_device *device, u8 port_num,
				       struct ib_class_port_info *class_port_info)
{
	struct class_port_info_context *cb_ctx;
	int ret;

	cb_ctx = kmalloc(sizeof(*cb_ctx), GFP_KERNEL);
	if (!cb_ctx)
		return -ENOMEM;

	cb_ctx->device = device;
	cb_ctx->class_port_info = class_port_info;
	cb_ctx->port_num = port_num;
	init_completion(&cb_ctx->done);

	ret = ib_sa_classport_info_rec_query(&sa_client, device, port_num,
					     CMA_QUERY_CLASSPORT_INFO_TIMEOUT,
					     GFP_KERNEL, cma_query_sa_classport_info_cb,
					     cb_ctx, &cb_ctx->sa_query);
	if (ret < 0) {
		pr_err("RDMA CM: %s port %u failed to send ClassPortInfo query, ret: %d\n",
		       device->name, port_num, ret);
		goto out;
	}

	wait_for_completion(&cb_ctx->done);

out:
	kfree(cb_ctx);
	return ret;
}

static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
				 struct cma_multicast *mc)
{
	struct ib_sa_mcmember_rec rec;
	struct ib_class_port_info class_port_info;
	struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
	ib_sa_comp_mask comp_mask;
	int ret;
@@ -3776,7 +3839,24 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
	rec.qkey = cpu_to_be32(id_priv->qkey);
	rdma_addr_get_sgid(dev_addr, &rec.port_gid);
	rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
	rec.join_state = 1;
	rec.join_state = mc->join_state;

	if (rec.join_state == BIT(SENDONLY_FULLMEMBER_JOIN)) {
		ret = cma_query_sa_classport_info(id_priv->id.device,
						  id_priv->id.port_num,
						  &class_port_info);

		if (ret)
			return ret;

		if (!(ib_get_cpi_capmask2(&class_port_info) &
		      IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT)) {
			pr_warn("RDMA CM: %s port %u Unable to multicast join\n"
				"RDMA CM: SM doesn't support Send Only Full Member option\n",
				id_priv->id.device->name, id_priv->id.port_num);
			return -EOPNOTSUPP;
		}
	}

	comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
		    IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
@@ -3845,6 +3925,9 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
	struct sockaddr *addr = (struct sockaddr *)&mc->addr;
	struct net_device *ndev = NULL;
	enum ib_gid_type gid_type;
	bool send_only;

	send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);

	if (cma_zero_addr((struct sockaddr *)&mc->addr))
		return -EINVAL;
@@ -3878,12 +3961,14 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
	gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
		   rdma_start_port(id_priv->cma_dev->device)];
	if (addr->sa_family == AF_INET) {
		if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
		if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
			mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
			if (!send_only) {
				err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
						    true);
		if (!err) {
				if (!err)
					mc->igmp_joined = true;
			mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
			}
		}
	} else {
		if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
@@ -3913,7 +3998,7 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
}

int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
			void *context)
			u8 join_state, void *context)
{
	struct rdma_id_private *id_priv;
	struct cma_multicast *mc;
@@ -3932,6 +4017,7 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
	mc->context = context;
	mc->id_priv = id_priv;
	mc->igmp_joined = false;
	mc->join_state = join_state;
	spin_lock(&id_priv->lock);
	list_add(&mc->list, &id_priv->mc_list);
	spin_unlock(&id_priv->lock);
+2 −1
Original line number Diff line number Diff line
@@ -37,6 +37,7 @@
#define IWPM_MAPINFO_HASH_MASK	(IWPM_MAPINFO_HASH_SIZE - 1)
#define IWPM_REMINFO_HASH_SIZE	64
#define IWPM_REMINFO_HASH_MASK	(IWPM_REMINFO_HASH_SIZE - 1)
#define IWPM_MSG_SIZE		512

static LIST_HEAD(iwpm_nlmsg_req_list);
static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock);
@@ -452,7 +453,7 @@ struct sk_buff *iwpm_create_nlmsg(u32 nl_op, struct nlmsghdr **nlh,
{
	struct sk_buff *skb = NULL;

	skb = dev_alloc_skb(NLMSG_GOODSIZE);
	skb = dev_alloc_skb(IWPM_MSG_SIZE);
	if (!skb) {
		pr_err("%s Unable to allocate skb\n", __func__);
		goto create_nlmsg_exit;
+0 −12
Original line number Diff line number Diff line
@@ -93,18 +93,6 @@ enum {

struct mcast_member;

/*
* There are 4 types of join states:
* FullMember, NonMember, SendOnlyNonMember, SendOnlyFullMember.
*/
enum {
	FULLMEMBER_JOIN,
	NONMEMBER_JOIN,
	SENDONLY_NONMEBER_JOIN,
	SENDONLY_FULLMEMBER_JOIN,
	NUM_JOIN_MEMBERSHIP_TYPES,
};

struct mcast_group {
	struct ib_sa_mcmember_rec rec;
	struct rb_node		node;
Loading