Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8d78f0f2 authored by James Simmons's avatar James Simmons Committed by Greg Kroah-Hartman
Browse files

staging: lustre: lnet: cleanup some of the > 80 line issues



Cleanup some of the checkpatch over 80 character lines
reported.

Signed-off-by: default avatarJames Simmons <jsimmons@infradead.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 4c93630f
Loading
Loading
Loading
Loading
+21 −10
Original line number Diff line number Diff line
@@ -96,7 +96,8 @@ ksocknal_destroy_route(struct ksock_route *route)
}

static int
ksocknal_create_peer(struct ksock_peer **peerp, lnet_ni_t *ni, lnet_process_id_t id)
ksocknal_create_peer(struct ksock_peer **peerp, lnet_ni_t *ni,
		     lnet_process_id_t id)
{
	int cpt = lnet_cpt_of_nid(id.nid);
	struct ksock_net *net = ni->ni_data;
@@ -319,7 +320,8 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index,
}

static void
ksocknal_associate_route_conn_locked(struct ksock_route *route, struct ksock_conn *conn)
ksocknal_associate_route_conn_locked(struct ksock_route *route,
				     struct ksock_conn *conn)
{
	struct ksock_peer *peer = route->ksnr_peer;
	int type = conn->ksnc_type;
@@ -821,7 +823,8 @@ ksocknal_select_ips(struct ksock_peer *peer, __u32 *peerips, int n_peerips)
				if (k < peer->ksnp_n_passive_ips) /* using it already */
					continue;

				k = ksocknal_match_peerip(iface, peerips, n_peerips);
				k = ksocknal_match_peerip(iface, peerips,
							  n_peerips);
				xor = ip ^ peerips[k];
				this_netmatch = !(xor & iface->ksni_netmask) ? 1 : 0;

@@ -1302,7 +1305,10 @@ ksocknal_create_conn(lnet_ni_t *ni, struct ksock_route *route,

	/* Take packets blocking for this connection. */
	list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) {
		if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) == SOCKNAL_MATCH_NO)
		int match = conn->ksnc_proto->pro_match_tx(conn, tx,
							   tx->tx_nonblk);

		if (match == SOCKNAL_MATCH_NO)
			continue;

		list_del(&tx->tx_list);
@@ -1786,7 +1792,8 @@ ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
			      (id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid)))
				continue;

			count += ksocknal_close_peer_conns_locked(peer, ipaddr, 0);
			count += ksocknal_close_peer_conns_locked(peer, ipaddr,
								  0);
		}
	}

@@ -2026,7 +2033,10 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
		}

		rc = 0;
		/* NB only new connections will pay attention to the new interface! */
		/*
		 * NB only new connections will pay attention to the
		 * new interface!
		 */
	}

	write_unlock_bh(&ksocknal_data.ksnd_global_lock);
@@ -2200,8 +2210,9 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
		int txmem;
		int rxmem;
		int nagle;
		struct ksock_conn *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
		struct ksock_conn *conn;

		conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
		if (!conn)
			return -ENOENT;

+26 −14
Original line number Diff line number Diff line
@@ -620,7 +620,8 @@ ksocknal_launch_all_connections_locked(struct ksock_peer *peer)
}

struct ksock_conn *
ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx, int nonblk)
ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx,
			  int nonblk)
{
	struct list_head *tmp;
	struct ksock_conn *conn;
@@ -630,10 +631,12 @@ ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx, int nonb
	int fnob = 0;

	list_for_each(tmp, &peer->ksnp_conns) {
		struct ksock_conn *c  = list_entry(tmp, struct ksock_conn, ksnc_list);
		int nob = atomic_read(&c->ksnc_tx_nob) +
		struct ksock_conn *c;
		int nob, rc;

		c = list_entry(tmp, struct ksock_conn, ksnc_list);
		nob = atomic_read(&c->ksnc_tx_nob) +
		      c->ksnc_sock->sk->sk_wmem_queued;
		int rc;

		LASSERT(!c->ksnc_closing);
		LASSERT(c->ksnc_proto &&
@@ -752,9 +755,9 @@ ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn)
		LASSERT(msg->ksm_zc_cookies[1]);
		LASSERT(conn->ksnc_proto->pro_queue_tx_zcack);

		/* ZC ACK piggybacked on ztx release tx later */
		if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
			ztx = tx; /* ZC ACK piggybacked on ztx release tx later */

			ztx = tx;
	} else {
		/*
		 * It's a normal packet - can it piggback a noop zc-ack that
@@ -796,7 +799,8 @@ ksocknal_find_connectable_route_locked(struct ksock_peer *peer)

		LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);

		if (route->ksnr_scheduled)      /* connections being established */
		/* connections being established */
		if (route->ksnr_scheduled)
			continue;

		/* all route types connected ? */
@@ -1514,7 +1518,10 @@ int ksocknal_scheduler(void *arg)
			rc = ksocknal_process_transmit(conn, tx);

			if (rc == -ENOMEM || rc == -EAGAIN) {
				/* Incomplete send: replace tx on HEAD of tx_queue */
				/*
				 * Incomplete send: replace tx on HEAD of
				 * tx_queue
				 */
				spin_lock_bh(&sched->kss_lock);
				list_add(&tx->tx_list, &conn->ksnc_tx_queue);
			} else {
@@ -1724,7 +1731,8 @@ ksocknal_recv_hello(lnet_ni_t *ni, struct ksock_conn *conn,
	timeout = active ? *ksocknal_tunables.ksnd_timeout :
			    lnet_acceptor_timeout();

	rc = lnet_sock_read(sock, &hello->kshm_magic, sizeof(hello->kshm_magic), timeout);
	rc = lnet_sock_read(sock, &hello->kshm_magic,
			    sizeof(hello->kshm_magic), timeout);
	if (rc) {
		CERROR("Error %d reading HELLO from %pI4h\n",
		       rc, &conn->ksnc_ipaddr);
@@ -1798,7 +1806,8 @@ ksocknal_recv_hello(lnet_ni_t *ni, struct ksock_conn *conn,
	    conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
		/* Userspace NAL assigns peer process ID from socket */
		recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG;
		recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr);
		recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
					 conn->ksnc_ipaddr);
	} else {
		recv_id.nid = hello->kshm_src_nid;
		recv_id.pid = hello->kshm_src_pid;
@@ -1882,7 +1891,8 @@ ksocknal_connect(struct ksock_route *route)
		if (peer->ksnp_accepting > 0) {
			CDEBUG(D_NET,
			       "peer %s(%d) already connecting to me, retry later.\n",
			       libcfs_nid2str(peer->ksnp_id.nid), peer->ksnp_accepting);
			       libcfs_nid2str(peer->ksnp_id.nid),
			       peer->ksnp_accepting);
			retry_later = 1;
		}

@@ -2241,7 +2251,8 @@ ksocknal_connd(void *arg)

		/* Nothing to do for 'timeout'  */
		set_current_state(TASK_INTERRUPTIBLE);
		add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
		add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq,
					 &wait);
		spin_unlock_bh(connd_lock);

		nloops = 0;
@@ -2371,7 +2382,8 @@ ksocknal_send_keepalive_locked(struct ksock_peer *peer)
	struct ksock_conn *conn;
	struct ksock_tx *tx;

	if (list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */
	/* last_alive will be updated by create_conn */
	if (list_empty(&peer->ksnp_conns))
		return 0;

	if (peer->ksnp_proto != &ksocknal_protocol_v3x)
+4 −2
Original line number Diff line number Diff line
@@ -202,7 +202,8 @@ ksocknal_lib_recv_iov(struct ksock_conn *conn)
				fragnob = sum;

			conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
							   iov[i].iov_base, fragnob);
							   iov[i].iov_base,
							   fragnob);
		}
		conn->ksnc_msg.ksm_csum = saved_csum;
	}
@@ -291,7 +292,8 @@ ksocknal_lib_csum_tx(struct ksock_tx *tx)
}

int
ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem, int *rxmem, int *nagle)
ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem,
			       int *rxmem, int *nagle)
{
	struct socket *sock = conn->ksnc_sock;
	int len;
+20 −6
Original line number Diff line number Diff line
@@ -194,7 +194,10 @@ ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn,
	}

	if (!tx->tx_msg.ksm_zc_cookies[0]) {
		/* NOOP tx has only one ZC-ACK cookie, can carry at least one more */
		/*
		 * NOOP tx has only one ZC-ACK cookie,
		 * can carry at least one more
		 */
		if (tx->tx_msg.ksm_zc_cookies[1] > cookie) {
			tx->tx_msg.ksm_zc_cookies[0] = tx->tx_msg.ksm_zc_cookies[1];
			tx->tx_msg.ksm_zc_cookies[1] = cookie;
@@ -203,7 +206,10 @@ ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn,
		}

		if (tx->tx_msg.ksm_zc_cookies[0] - tx->tx_msg.ksm_zc_cookies[1] > 2) {
			/* not likely to carry more ACKs, skip it to simplify logic */
			/*
			 * not likely to carry more ACKs, skip it
			 * to simplify logic
			 */
			ksocknal_next_tx_carrier(conn);
		}

@@ -237,7 +243,10 @@ ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn,
		}

	} else {
		/* ksm_zc_cookies[0] < ksm_zc_cookies[1], it is range of cookies */
		/*
		 * ksm_zc_cookies[0] < ksm_zc_cookies[1],
		 * it is range of cookies
		 */
		if (cookie >= tx->tx_msg.ksm_zc_cookies[0] &&
		    cookie <= tx->tx_msg.ksm_zc_cookies[1]) {
			CWARN("%s: duplicated ZC cookie: %llu\n",
@@ -425,7 +434,8 @@ ksocknal_handle_zcack(struct ksock_conn *conn, __u64 cookie1, __u64 cookie2)
				 tx_zc_list) {
		__u64 c = tx->tx_msg.ksm_zc_cookies[0];

		if (c == cookie1 || c == cookie2 || (cookie1 < c && c < cookie2)) {
		if (c == cookie1 || c == cookie2 ||
		    (cookie1 < c && c < cookie2)) {
			tx->tx_msg.ksm_zc_cookies[0] = 0;
			list_del(&tx->tx_zc_list);
			list_add(&tx->tx_zc_list, &zlist);
@@ -639,7 +649,8 @@ ksocknal_recv_hello_v1(struct ksock_conn *conn, ksock_hello_msg_t *hello,
}

static int
ksocknal_recv_hello_v2(struct ksock_conn *conn, ksock_hello_msg_t *hello, int timeout)
ksocknal_recv_hello_v2(struct ksock_conn *conn, ksock_hello_msg_t *hello,
		       int timeout)
{
	struct socket *sock = conn->ksnc_sock;
	int rc;
@@ -737,7 +748,10 @@ ksocknal_pack_msg_v2(struct ksock_tx *tx)
		tx->tx_nob = offsetof(ksock_msg_t,  ksm_u.lnetmsg.ksnm_hdr);
		tx->tx_resid = offsetof(ksock_msg_t,  ksm_u.lnetmsg.ksnm_hdr);
	}
	/* Don't checksum before start sending, because packet can be piggybacked with ACK */
	/*
	 * Don't checksum before start sending, because packet can be
	 * piggybacked with ACK
	 */
}

static void
+6 −4
Original line number Diff line number Diff line
@@ -87,7 +87,8 @@ lstcon_rpc_done(struct srpc_client_rpc *rpc)

static int
lstcon_rpc_init(struct lstcon_node *nd, int service, unsigned int feats,
		int bulk_npg, int bulk_len, int embedded, struct lstcon_rpc *crpc)
		int bulk_npg, int bulk_len, int embedded,
		struct lstcon_rpc *crpc)
{
	crpc->crp_rpc = sfw_create_rpc(nd->nd_id, service,
				       feats, bulk_npg, bulk_len,
@@ -778,7 +779,8 @@ lstcon_pingrpc_prep(lst_test_ping_param_t *param, struct srpc_test_reqst *req)
}

static int
lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req)
lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param,
		       struct srpc_test_reqst *req)
{
	struct test_bulk_req *brq = &req->tsr_u.bulk_v0;

Loading