Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c4690127 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'smc-fixes'



Ursula Braun says:

====================
net/smc: fixes 2018-03-14

here are smc changes for the net-next tree.
The first patch enables SMC to work with mlx5-RoCE-devices.
Patches 2 and 3 deal with link group freeing.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5677629a 97cdbc42
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -1477,6 +1477,7 @@ static void __exit smc_exit(void)
	spin_unlock_bh(&smc_lgr_list.lock);
	list_for_each_entry_safe(lgr, lg, &lgr_freeing_list, list) {
		list_del_init(&lgr->list);
		cancel_delayed_work_sync(&lgr->free_work);
		smc_lgr_free(lgr); /* free link group */
	}
	static_branch_disable(&tcp_have_smc);
+15 −8
Original line number Diff line number Diff line
@@ -32,6 +32,17 @@

static u32 smc_lgr_num;			/* unique link group number */

static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
{
	/* client link group creation always follows the server link group
	 * creation. For client use a somewhat higher removal delay time,
	 * otherwise there is a risk of out-of-sync link groups.
	 */
	mod_delayed_work(system_wq, &lgr->free_work,
			 lgr->role == SMC_CLNT ? SMC_LGR_FREE_DELAY_CLNT :
						 SMC_LGR_FREE_DELAY_SERV);
}

/* Register connection's alert token in our lookup structure.
 * To use rbtrees we have to implement our own insert core.
 * Requires @conns_lock
@@ -111,13 +122,7 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn)
	write_unlock_bh(&lgr->conns_lock);
	if (!reduced || lgr->conns_num)
		return;
	/* client link group creation always follows the server link group
	 * creation. For client use a somewhat higher removal delay time,
	 * otherwise there is a risk of out-of-sync link groups.
	 */
	mod_delayed_work(system_wq, &lgr->free_work,
			 lgr->role == SMC_CLNT ? SMC_LGR_FREE_DELAY_CLNT :
						 SMC_LGR_FREE_DELAY_SERV);
	smc_lgr_schedule_free_work(lgr);
}

static void smc_lgr_free_work(struct work_struct *work)
@@ -140,6 +145,7 @@ static void smc_lgr_free_work(struct work_struct *work)
	list_del_init(&lgr->list); /* remove from smc_lgr_list */
free:
	spin_unlock_bh(&smc_lgr_list.lock);
	if (!delayed_work_pending(&lgr->free_work))
		smc_lgr_free(lgr);
}

@@ -343,6 +349,7 @@ void smc_lgr_terminate(struct smc_link_group *lgr)
	}
	write_unlock_bh(&lgr->conns_lock);
	wake_up(&lgr->lnk[SMC_SINGLE_LINK].wr_reg_wait);
	smc_lgr_schedule_free_work(lgr);
}

/* Determine vlan of internal TCP socket.
+9 −1
Original line number Diff line number Diff line
@@ -23,6 +23,8 @@
#include "smc_wr.h"
#include "smc.h"

#define SMC_MAX_CQE 32766	/* max. # of completion queue elements */

#define SMC_QP_MIN_RNR_TIMER		5
#define SMC_QP_TIMEOUT			15 /* 4096 * 2 ** timeout usec */
#define SMC_QP_RETRY_CNT			7 /* 7: infinite */
@@ -438,9 +440,15 @@ int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport)
long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
{
	struct ib_cq_init_attr cqattr =	{
		.cqe = SMC_WR_MAX_CQE, .comp_vector = 0 };
		.cqe = SMC_MAX_CQE, .comp_vector = 0 };
	int cqe_size_order, smc_order;
	long rc;

	/* the calculated number of cq entries fits to mlx5 cq allocation */
	cqe_size_order = cache_line_size() == 128 ? 7 : 6;
	smc_order = MAX_ORDER - cqe_size_order - 1;
	if (SMC_MAX_CQE + 2 > (0x00000001 << smc_order) * PAGE_SIZE)
		cqattr.cqe = (0x00000001 << smc_order) * PAGE_SIZE - 2;
	smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev,
					      smc_wr_tx_cq_handler, NULL,
					      smcibdev, &cqattr);
+0 −1
Original line number Diff line number Diff line
@@ -19,7 +19,6 @@
#include "smc.h"
#include "smc_core.h"

#define SMC_WR_MAX_CQE 32768	/* max. # of completion queue elements */
#define SMC_WR_BUF_CNT 16	/* # of ctrl buffers per link */

#define SMC_WR_TX_WAIT_FREE_SLOT_TIME	(10 * HZ)