Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bd4ad577 authored by Ursula Braun's avatar Ursula Braun Committed by David S. Miller
Browse files

smc: initialize IB transport incl. PD, MR, QP, CQ, event, WR



Prepare the link for RDMA transport:
Create a queue pair (QP) and move it into the state Ready-To-Receive (RTR).

Signed-off-by: default avatarUrsula Braun <ubraun@linux.vnet.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f38ba179
Loading
Loading
Loading
Loading
+28 −6
Original line number Diff line number Diff line
@@ -339,9 +339,20 @@ static int smc_connect_rdma(struct smc_sock *smc)

	if (local_contact == SMC_FIRST_CONTACT)
		smc_link_save_peer_info(link, &aclc);
	/* tbd in follow-on patch: more steps to setup RDMA communcication,
	 * create rmbs, map rmbs, rtoken_handling, modify_qp
	 */

	rc = smc_rmb_rtoken_handling(&smc->conn, &aclc);
	if (rc) {
		reason_code = SMC_CLC_DECL_INTERR;
		goto decline_rdma_unlock;
	}

	if (local_contact == SMC_FIRST_CONTACT) {
		rc = smc_ib_ready_link(link);
		if (rc) {
			reason_code = SMC_CLC_DECL_INTERR;
			goto decline_rdma_unlock;
		}
	}

	rc = smc_clc_send_confirm(smc);
	if (rc)
@@ -638,9 +649,20 @@ static void smc_listen_work(struct work_struct *work)
	if (local_contact == SMC_FIRST_CONTACT)
		smc_link_save_peer_info(link, &cclc);

	/* tbd in follow-on patch: more steps to setup RDMA communcication,
	 * rtoken_handling, modify_qp
	 */
	rc = smc_rmb_rtoken_handling(&new_smc->conn, &cclc);
	if (rc) {
		reason_code = SMC_CLC_DECL_INTERR;
		goto decline_rdma;
	}

	/* tbd in follow-on patch: modify_qp, llc_confirm */
	if (local_contact == SMC_FIRST_CONTACT) {
		rc = smc_ib_ready_link(link);
		if (rc) {
			reason_code = SMC_CLC_DECL_INTERR;
			goto decline_rdma;
		}
	}

out_connected:
	sk_refcnt_debug_inc(newsmcsk);
+1 −0
Original line number Diff line number Diff line
@@ -43,6 +43,7 @@ struct smc_connection {
	atomic_t		peer_rmbe_space;/* remaining free bytes in peer
						 * rmbe
						 */
	int			rtoken_idx;	/* idx to peer RMB rkey/addr */

	struct smc_buf_desc	*sndbuf_desc;	/* send buffer descriptor */
	int			sndbuf_size;	/* sndbuf size <== sock wmem */
+7 −3
Original line number Diff line number Diff line
@@ -201,13 +201,15 @@ int smc_clc_send_confirm(struct smc_sock *smc)
	       SMC_GID_SIZE);
	memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1],
	       sizeof(link->smcibdev->mac));

	/* tbd in follow-on patch: fill in rmb-related values */

	hton24(cclc.qpn, link->roce_qp->qp_num);
	cclc.rmb_rkey =
		htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey);
	cclc.conn_idx = 1; /* for now: 1 RMB = 1 RMBE */
	cclc.rmbe_alert_token = htonl(conn->alert_token_local);
	cclc.qp_mtu = min(link->path_mtu, link->peer_mtu);
	cclc.rmbe_size = conn->rmbe_size_short;
	cclc.rmb_dma_addr =
		cpu_to_be64((u64)conn->rmb_desc->dma_addr[SMC_SINGLE_LINK]);
	hton24(cclc.psn, link->psn_initial);

	memcpy(cclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
@@ -253,6 +255,8 @@ int smc_clc_send_accept(struct smc_sock *new_smc, int srv_first_contact)
	memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1],
	       sizeof(link->smcibdev->mac[link->ibport - 1]));
	hton24(aclc.qpn, link->roce_qp->qp_num);
	aclc.rmb_rkey =
		htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey);
	aclc.conn_idx = 1;			/* as long as 1 RMB = 1 RMBE */
	aclc.rmbe_alert_token = htonl(conn->alert_token_local);
	aclc.qp_mtu = link->path_mtu;
+76 −0
Original line number Diff line number Diff line
@@ -160,12 +160,23 @@ static int smc_lgr_create(struct smc_sock *smc, __be32 peer_in_addr,
	lnk->smcibdev = smcibdev;
	lnk->ibport = ibport;
	lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu;
	if (!smcibdev->initialized)
		smc_ib_setup_per_ibdev(smcibdev);
	get_random_bytes(rndvec, sizeof(rndvec));
	lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) + (rndvec[2] << 16);
	rc = smc_wr_alloc_link_mem(lnk);
	if (rc)
		goto free_lgr;
	init_waitqueue_head(&lnk->wr_tx_wait);
	rc = smc_ib_create_protection_domain(lnk);
	if (rc)
		goto free_link_mem;
	rc = smc_ib_create_queue_pair(lnk);
	if (rc)
		goto dealloc_pd;
	rc = smc_wr_create_link(lnk);
	if (rc)
		goto destroy_qp;

	smc->conn.lgr = lgr;
	rwlock_init(&lgr->conns_lock);
@@ -174,6 +185,12 @@ static int smc_lgr_create(struct smc_sock *smc, __be32 peer_in_addr,
	spin_unlock_bh(&smc_lgr_list.lock);
	return 0;

destroy_qp:
	smc_ib_destroy_queue_pair(lnk);
dealloc_pd:
	smc_ib_dealloc_protection_domain(lnk);
free_link_mem:
	smc_wr_free_link_mem(lnk);
free_lgr:
	kfree(lgr);
out:
@@ -211,7 +228,10 @@ void smc_conn_free(struct smc_connection *conn)
static void smc_link_clear(struct smc_link *lnk)
{
	lnk->peer_qpn = 0;
	smc_ib_modify_qp_reset(lnk);
	smc_wr_free_link(lnk);
	smc_ib_destroy_queue_pair(lnk);
	smc_ib_dealloc_protection_domain(lnk);
	smc_wr_free_link_mem(lnk);
}

@@ -223,6 +243,10 @@ static void smc_lgr_free_sndbufs(struct smc_link_group *lgr)
	for (i = 0; i < SMC_RMBE_SIZES; i++) {
		list_for_each_entry_safe(sndbuf_desc, bf_desc, &lgr->sndbufs[i],
					 list) {
			list_del(&sndbuf_desc->list);
			smc_ib_buf_unmap(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
					 smc_uncompress_bufsize(i),
					 sndbuf_desc, DMA_TO_DEVICE);
			kfree(sndbuf_desc->cpu_addr);
			kfree(sndbuf_desc);
		}
@@ -232,11 +256,16 @@ static void smc_lgr_free_sndbufs(struct smc_link_group *lgr)
static void smc_lgr_free_rmbs(struct smc_link_group *lgr)
{
	struct smc_buf_desc *rmb_desc, *bf_desc;
	struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
	int i;

	for (i = 0; i < SMC_RMBE_SIZES; i++) {
		list_for_each_entry_safe(rmb_desc, bf_desc, &lgr->rmbs[i],
					 list) {
			list_del(&rmb_desc->list);
			smc_ib_buf_unmap(lnk->smcibdev,
					 smc_uncompress_bufsize(i),
					 rmb_desc, DMA_FROM_DEVICE);
			kfree(rmb_desc->cpu_addr);
			kfree(rmb_desc);
		}
@@ -550,6 +579,18 @@ int smc_rmb_create(struct smc_sock *smc)
			kfree(rmb_desc);
			continue; /* if mapping failed, try smaller one */
		}
		rc = smc_ib_get_memory_region(lgr->lnk[SMC_SINGLE_LINK].roce_pd,
					      IB_ACCESS_REMOTE_WRITE |
					      IB_ACCESS_LOCAL_WRITE,
					     &rmb_desc->mr_rx[SMC_SINGLE_LINK]);
		if (rc) {
			smc_ib_buf_unmap(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
					 tmp_bufsize, rmb_desc,
					 DMA_FROM_DEVICE);
			kfree(rmb_desc->cpu_addr);
			kfree(rmb_desc);
			continue;
		}
		rmb_desc->used = 1;
		write_lock_bh(&lgr->rmbs_lock);
		list_add(&rmb_desc->list,
@@ -567,3 +608,38 @@ int smc_rmb_create(struct smc_sock *smc)
		return -ENOMEM;
	}
}

static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr)
{
	int i;

	for_each_clear_bit(i, lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX) {
		if (!test_and_set_bit(i, lgr->rtokens_used_mask))
			return i;
	}
	return -ENOSPC;
}

/* save rkey and dma_addr received from peer during clc handshake */
int smc_rmb_rtoken_handling(struct smc_connection *conn,
			    struct smc_clc_msg_accept_confirm *clc)
{
	u64 dma_addr = be64_to_cpu(clc->rmb_dma_addr);
	struct smc_link_group *lgr = conn->lgr;
	u32 rkey = ntohl(clc->rmb_rkey);
	int i;

	for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
		if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) &&
		    test_bit(i, lgr->rtokens_used_mask)) {
			conn->rtoken_idx = i;
			return 0;
		}
	}
	conn->rtoken_idx = smc_rmb_reserve_rtoken_idx(lgr);
	if (conn->rtoken_idx < 0)
		return conn->rtoken_idx;
	lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey = rkey;
	lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr = dma_addr;
	return 0;
}
+18 −0
Original line number Diff line number Diff line
@@ -90,9 +90,18 @@ struct smc_buf_desc {
	u64			dma_addr[SMC_LINKS_PER_LGR_MAX];
						/* mapped address of buffer */
	void			*cpu_addr;	/* virtual address of buffer */
	struct ib_mr		*mr_rx[SMC_LINKS_PER_LGR_MAX];
						/* for rmb only:
						 * rkey provided to peer
						 */
	u32			used;		/* currently used / unused */
};

struct smc_rtoken {				/* address/key of remote RMB */
	u64			dma_addr;
	u32			rkey;
};

struct smc_link_group {
	struct list_head	list;
	enum smc_lgr_role	role;		/* client or server */
@@ -109,6 +118,13 @@ struct smc_link_group {
	rwlock_t		sndbufs_lock;	/* protects tx buffers */
	struct list_head	rmbs[SMC_RMBE_SIZES];	/* rx buffers */
	rwlock_t		rmbs_lock;	/* protects rx buffers */
	struct smc_rtoken	rtokens[SMC_RMBS_PER_LGR_MAX]
				       [SMC_LINKS_PER_LGR_MAX];
						/* remote addr/key pairs */
	unsigned long		rtokens_used_mask[BITS_TO_LONGS(
							SMC_RMBS_PER_LGR_MAX)];
						/* used rtoken elements */

	struct delayed_work	free_work;	/* delayed freeing of an lgr */
	bool			sync_err;	/* lgr no longer fits to peer */
};
@@ -153,5 +169,7 @@ void smc_lgr_free(struct smc_link_group *lgr);
void smc_lgr_terminate(struct smc_link_group *lgr);
int smc_sndbuf_create(struct smc_sock *smc);
int smc_rmb_create(struct smc_sock *smc);
int smc_rmb_rtoken_handling(struct smc_connection *conn,
			    struct smc_clc_msg_accept_confirm *clc);

#endif
Loading