Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 395069fc authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker
Browse files

xprtrdma: Add trace points for calls to transport switch methods



Name them "trace_xprtrdma_op_*" so they can be easily enabled as a
group. No trace point is added where the generic layer already has
observability.

Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent ba217ec6
Loading
Loading
Loading
Loading
+6 −4
Original line number Diff line number Diff line
@@ -381,11 +381,13 @@ TRACE_EVENT(xprtrdma_disconnect,
DEFINE_RXPRT_EVENT(xprtrdma_conn_start);
DEFINE_RXPRT_EVENT(xprtrdma_conn_tout);
DEFINE_RXPRT_EVENT(xprtrdma_create);
DEFINE_RXPRT_EVENT(xprtrdma_destroy);
DEFINE_RXPRT_EVENT(xprtrdma_op_destroy);
DEFINE_RXPRT_EVENT(xprtrdma_remove);
DEFINE_RXPRT_EVENT(xprtrdma_reinsert);
DEFINE_RXPRT_EVENT(xprtrdma_reconnect);
DEFINE_RXPRT_EVENT(xprtrdma_inject_dsc);
DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
DEFINE_RXPRT_EVENT(xprtrdma_op_close);
DEFINE_RXPRT_EVENT(xprtrdma_op_connect);

TRACE_EVENT(xprtrdma_qp_event,
	TP_PROTO(
@@ -834,7 +836,7 @@ TRACE_EVENT(xprtrdma_decode_seg,
 ** Allocation/release of rpcrdma_reqs and rpcrdma_reps
 **/

TRACE_EVENT(xprtrdma_allocate,
TRACE_EVENT(xprtrdma_op_allocate,
	TP_PROTO(
		const struct rpc_task *task,
		const struct rpcrdma_req *req
@@ -864,7 +866,7 @@ TRACE_EVENT(xprtrdma_allocate,
	)
);

TRACE_EVENT(xprtrdma_rpc_done,
TRACE_EVENT(xprtrdma_op_free,
	TP_PROTO(
		const struct rpc_task *task,
		const struct rpcrdma_req *req
+11 −7
Original line number Diff line number Diff line
@@ -268,7 +268,7 @@ xprt_rdma_inject_disconnect(struct rpc_xprt *xprt)
{
	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);

	trace_xprtrdma_inject_dsc(r_xprt);
	trace_xprtrdma_op_inject_dsc(r_xprt);
	rdma_disconnect(r_xprt->rx_ia.ri_id);
}

@@ -284,7 +284,7 @@ xprt_rdma_destroy(struct rpc_xprt *xprt)
{
	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);

	trace_xprtrdma_destroy(r_xprt);
	trace_xprtrdma_op_destroy(r_xprt);

	cancel_delayed_work_sync(&r_xprt->rx_connect_worker);

@@ -418,7 +418,7 @@ xprt_setup_rdma(struct xprt_create *args)
out2:
	rpcrdma_ia_close(&new_xprt->rx_ia);
out1:
	trace_xprtrdma_destroy(new_xprt);
	trace_xprtrdma_op_destroy(new_xprt);
	xprt_rdma_free_addresses(xprt);
	xprt_free(xprt);
	return ERR_PTR(rc);
@@ -428,7 +428,8 @@ xprt_setup_rdma(struct xprt_create *args)
 * xprt_rdma_close - close a transport connection
 * @xprt: transport context
 *
 * Called during transport shutdown, reconnect, or device removal.
 * Called during autoclose or device removal.
 *
 * Caller holds @xprt's send lock to prevent activity on this
 * transport while the connection is torn down.
 */
@@ -440,6 +441,8 @@ void xprt_rdma_close(struct rpc_xprt *xprt)

	might_sleep();

	trace_xprtrdma_op_close(r_xprt);

	/* Prevent marshaling and sending of new requests */
	xprt_clear_connected(xprt);

@@ -525,6 +528,7 @@ xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
{
	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);

	trace_xprtrdma_op_connect(r_xprt);
	if (r_xprt->rx_ep.rep_connected != 0) {
		/* Reconnect */
		schedule_delayed_work(&r_xprt->rx_connect_worker,
@@ -659,11 +663,11 @@ xprt_rdma_allocate(struct rpc_task *task)

	rqst->rq_buffer = req->rl_sendbuf->rg_base;
	rqst->rq_rbuffer = req->rl_recvbuf->rg_base;
	trace_xprtrdma_allocate(task, req);
	trace_xprtrdma_op_allocate(task, req);
	return 0;

out_fail:
	trace_xprtrdma_allocate(task, NULL);
	trace_xprtrdma_op_allocate(task, NULL);
	return -ENOMEM;
}

@@ -682,7 +686,7 @@ xprt_rdma_free(struct rpc_task *task)

	if (test_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags))
		rpcrdma_release_rqst(r_xprt, req);
	trace_xprtrdma_rpc_done(task, req);
	trace_xprtrdma_op_free(task, req);
}

/**