Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dadfcdab authored by Oleg Drokin's avatar Oleg Drokin Committed by Greg Kroah-Hartman
Browse files

staging/lustre/ptlrpc: Adjust comments to better conform to coding style



This patch fixes "Block comments use a trailing */ on a separate line"
warnings from checkpatch

Signed-off-by: default avatarOleg Drokin <green@linuxhacker.ru>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 30aa9c52
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -72,7 +72,8 @@ ptlrpc_connection_get(lnet_process_id_t peer, lnet_nid_t self,
	 * returned and may be compared against out object.
	 */
	/* In the function below, .hs_keycmp resolves to
	 * conn_keycmp() */
	 * conn_keycmp()
	 */
	/* coverity[overrun-buffer-val] */
	conn2 = cfs_hash_findadd_unique(conn_hash, &peer, &conn->c_hash);
	if (conn != conn2) {
+34 −17
Original line number Diff line number Diff line
@@ -71,7 +71,8 @@ void request_out_callback(lnet_event_t *ev)
	if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) {

		/* Failed send: make it seem like the reply timed out, just
		 * like failing sends in client.c does currently...  */
		 * like failing sends in client.c does currently...
		 */

		req->rq_net_err = 1;
		ptlrpc_client_wake_req(req);
@@ -95,7 +96,8 @@ void reply_in_callback(lnet_event_t *ev)
	LASSERT(ev->md.start == req->rq_repbuf);
	LASSERT(ev->offset + ev->mlength <= req->rq_repbuf_len);
	/* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests
	   for adaptive timeouts' early reply. */
	 * for adaptive timeouts' early reply.
	 */
	LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0);

	spin_lock(&req->rq_lock);
@@ -151,7 +153,8 @@ void reply_in_callback(lnet_event_t *ev)
		req->rq_reply_off = ev->offset;
		req->rq_nob_received = ev->mlength;
		/* LNetMDUnlink can't be called under the LNET_LOCK,
		   so we must unlink in ptlrpc_unregister_reply */
		 * so we must unlink in ptlrpc_unregister_reply
		 */
		DEBUG_REQ(D_INFO, req,
			  "reply in flags=%x mlen=%u offset=%d replen=%d",
			  lustre_msg_get_flags(req->rq_reqmsg),
@@ -162,7 +165,8 @@ void reply_in_callback(lnet_event_t *ev)

out_wake:
	/* NB don't unlock till after wakeup; req can disappear under us
	 * since we don't have our own ref */
	 * since we don't have our own ref
	 */
	ptlrpc_client_wake_req(req);
	spin_unlock(&req->rq_lock);
}
@@ -213,7 +217,8 @@ void client_bulk_callback(lnet_event_t *ev)
		desc->bd_failure = 1;

	/* NB don't unlock till after wakeup; desc can disappear under us
	 * otherwise */
	 * otherwise
	 */
	if (desc->bd_md_count == 0)
		ptlrpc_client_wake_req(desc->bd_req);

@@ -250,7 +255,8 @@ static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt,
	__u64 new_seq;

	/* set sequence ID for request and add it to history list,
	 * it must be called with hold svcpt::scp_lock */
	 * it must be called with hold svcpt::scp_lock
	 */

	new_seq = (sec << REQS_SEC_SHIFT) |
		  (usec << REQS_USEC_SHIFT) |
@@ -258,7 +264,8 @@ static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt,

	if (new_seq > svcpt->scp_hist_seq) {
		/* This handles the initial case of scp_hist_seq == 0 or
		 * we just jumped into a new time window */
		 * we just jumped into a new time window
		 */
		svcpt->scp_hist_seq = new_seq;
	} else {
		LASSERT(REQS_SEQ_SHIFT(svcpt) < REQS_USEC_SHIFT);
@@ -266,7 +273,8 @@ static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt,
		 * however, it's possible that we used up all bits for
		 * sequence and jumped into the next usec bucket (future time),
		 * then we hope there will be less RPCs per bucket at some
		 * point, and sequence will catch up again */
		 * point, and sequence will catch up again
		 */
		svcpt->scp_hist_seq += (1U << REQS_SEQ_SHIFT(svcpt));
		new_seq = svcpt->scp_hist_seq;
	}
@@ -302,7 +310,8 @@ void request_in_callback(lnet_event_t *ev)
		 * request buffer we can use the request object embedded in
		 * rqbd.  Note that if we failed to allocate a request,
		 * we'd have to re-post the rqbd, which we can't do in this
		 * context. */
		 * context.
		 */
		req = &rqbd->rqbd_req;
		memset(req, 0, sizeof(*req));
	} else {
@@ -322,7 +331,8 @@ void request_in_callback(lnet_event_t *ev)

	/* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL,
	 * flags are reset and scalars are zero.  We only set the message
	 * size to non-zero if this was a successful receive. */
	 * size to non-zero if this was a successful receive.
	 */
	req->rq_xid = ev->match_bits;
	req->rq_reqbuf = ev->md.start + ev->offset;
	if (ev->type == LNET_EVENT_PUT && ev->status == 0)
@@ -352,7 +362,8 @@ void request_in_callback(lnet_event_t *ev)
		       svcpt->scp_nrqbds_posted);

		/* Normally, don't complain about 0 buffers posted; LNET won't
		 * drop incoming reqs since we set the portal lazy */
		 * drop incoming reqs since we set the portal lazy
		 */
		if (test_req_buffer_pressure &&
		    ev->type != LNET_EVENT_UNLINK &&
		    svcpt->scp_nrqbds_posted == 0)
@@ -369,7 +380,8 @@ void request_in_callback(lnet_event_t *ev)
	svcpt->scp_nreqs_incoming++;

	/* NB everything can disappear under us once the request
	 * has been queued and we unlock, so do the wake now... */
	 * has been queued and we unlock, so do the wake now...
	 */
	wake_up(&svcpt->scp_waitq);

	spin_unlock(&svcpt->scp_lock);
@@ -390,7 +402,8 @@ void reply_out_callback(lnet_event_t *ev)

	if (!rs->rs_difficult) {
		/* 'Easy' replies have no further processing so I drop the
		 * net's ref on 'rs' */
		 * net's ref on 'rs'
		 */
		LASSERT(ev->unlinked);
		ptlrpc_rs_decref(rs);
		return;
@@ -400,7 +413,8 @@ void reply_out_callback(lnet_event_t *ev)

	if (ev->unlinked) {
		/* Last network callback. The net's ref on 'rs' stays put
		 * until ptlrpc_handle_rs() is done with it */
		 * until ptlrpc_handle_rs() is done with it
		 */
		spin_lock(&svcpt->scp_rep_lock);
		spin_lock(&rs->rs_lock);

@@ -483,7 +497,8 @@ static void ptlrpc_ni_fini(void)
	/* Wait for the event queue to become idle since there may still be
	 * messages in flight with pending events (i.e. the fire-and-forget
	 * messages == client requests and "non-difficult" server
	 * replies */
	 * replies
	 */

	for (retries = 0;; retries++) {
		rc = LNetEQFree(ptlrpc_eq_h);
@@ -533,11 +548,13 @@ static int ptlrpc_ni_init(void)
	}

	/* CAVEAT EMPTOR: how we process portals events is _radically_
	 * different depending on... */
	 * different depending on...
	 */
	/* kernel LNet calls our master callback when there are new event,
	 * because we are guaranteed to get every event via callback,
	 * so we just set EQ size to 0 to avoid overhead of serializing
	 * enqueue/dequeue operations in LNet. */
	 * enqueue/dequeue operations in LNet.
	 */
	rc = LNetEQAlloc(0, ptlrpc_master_callback, &ptlrpc_eq_h);
	if (rc == 0)
		return 0;
+72 −42
Original line number Diff line number Diff line
@@ -112,7 +112,8 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
 * CLOSED. I would rather refcount the import and free it after
 * disconnection like we do with exports. To do that, the client_obd
 * will need to save the peer info somewhere other than in the import,
 * though. */
 * though.
 */
int ptlrpc_init_import(struct obd_import *imp)
{
	spin_lock(&imp->imp_lock);
@@ -282,11 +283,13 @@ void ptlrpc_invalidate_import(struct obd_import *imp)
	/* Wait forever until inflight == 0. We really can't do it another
	 * way because in some cases we need to wait for very long reply
	 * unlink. We can't do anything before that because there is really
	 * no guarantee that some rdma transfer is not in progress right now. */
	 * no guarantee that some rdma transfer is not in progress right now.
	 */
	do {
		/* Calculate max timeout for waiting on rpcs to error
		 * out. Use obd_timeout if calculated value is smaller
		 * than it. */
		 * than it.
		 */
		if (!OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
			timeout = ptlrpc_inflight_timeout(imp);
			timeout += timeout / 3;
@@ -304,7 +307,8 @@ void ptlrpc_invalidate_import(struct obd_import *imp)

		/* Wait for all requests to error out and call completion
		 * callbacks. Cap it at obd_timeout -- these should all
		 * have been locally cancelled by ptlrpc_abort_inflight. */
		 * have been locally cancelled by ptlrpc_abort_inflight.
		 */
		lwi = LWI_TIMEOUT_INTERVAL(
			cfs_timeout_cap(cfs_time_seconds(timeout)),
			(timeout > 1)?cfs_time_seconds(1):cfs_time_seconds(1)/2,
@@ -328,13 +332,15 @@ void ptlrpc_invalidate_import(struct obd_import *imp)
				 * maybe waiting for long reply unlink in
				 * sluggish nets). Let's check this. If there
				 * is no inflight and unregistering != 0, this
				 * is bug. */
				 * is bug.
				 */
				LASSERTF(count == 0, "Some RPCs are still unregistering: %d\n",
					 count);

				/* Let's save one loop as soon as inflight have
				 * dropped to zero. No new inflights possible at
				 * this point. */
				 * this point.
				 */
				rc = 0;
			} else {
				list_for_each_safe(tmp, n,
@@ -501,7 +507,8 @@ static int import_select_connection(struct obd_import *imp)
		       conn->oic_last_attempt);

		/* If we have not tried this connection since
		   the last successful attempt, go with this one */
		 * the last successful attempt, go with this one
		 */
		if ((conn->oic_last_attempt == 0) ||
		    cfs_time_beforeq_64(conn->oic_last_attempt,
				       imp->imp_last_success_conn)) {
@@ -511,8 +518,9 @@ static int import_select_connection(struct obd_import *imp)
		}

		/* If all of the connections have already been tried
		   since the last successful connection; just choose the
		   least recently used */
		 * since the last successful connection; just choose the
		 * least recently used
		 */
		if (!imp_conn)
			imp_conn = conn;
		else if (cfs_time_before_64(conn->oic_last_attempt,
@@ -529,10 +537,11 @@ static int import_select_connection(struct obd_import *imp)
	LASSERT(imp_conn->oic_conn);

	/* If we've tried everything, and we're back to the beginning of the
	   list, increase our timeout and try again. It will be reset when
	   we do finally connect. (FIXME: really we should wait for all network
	   state associated with the last connection attempt to drain before
	   trying to reconnect on it.) */
	 * list, increase our timeout and try again. It will be reset when
	 * we do finally connect. (FIXME: really we should wait for all network
	 * state associated with the last connection attempt to drain before
	 * trying to reconnect on it.)
	 */
	if (tried_all && (imp->imp_conn_list.next == &imp_conn->oic_item)) {
		struct adaptive_timeout *at = &imp->imp_at.iat_net_latency;

@@ -589,7 +598,8 @@ static int ptlrpc_first_transno(struct obd_import *imp, __u64 *transno)
	struct list_head *tmp;

	/* The requests in committed_list always have smaller transnos than
	 * the requests in replay_list */
	 * the requests in replay_list
	 */
	if (!list_empty(&imp->imp_committed_list)) {
		tmp = imp->imp_committed_list.next;
		req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
@@ -673,7 +683,8 @@ int ptlrpc_connect_import(struct obd_import *imp)
		goto out;

	/* Reset connect flags to the originally requested flags, in case
	 * the server is updated on-the-fly we will get the new features. */
	 * the server is updated on-the-fly we will get the new features.
	 */
	imp->imp_connect_data.ocd_connect_flags = imp->imp_connect_flags_orig;
	/* Reset ocd_version each time so the server knows the exact versions */
	imp->imp_connect_data.ocd_version = LUSTRE_VERSION_CODE;
@@ -699,7 +710,8 @@ int ptlrpc_connect_import(struct obd_import *imp)
	}

	/* Report the rpc service time to the server so that it knows how long
	 * to wait for clients to join recovery */
	 * to wait for clients to join recovery
	 */
	lustre_msg_set_service_time(request->rq_reqmsg,
				    at_timeout2est(request->rq_timeout));

@@ -707,7 +719,8 @@ int ptlrpc_connect_import(struct obd_import *imp)
	 * import_select_connection will increase the net latency on
	 * repeated reconnect attempts to cover slow networks.
	 * We override/ignore the server rpc completion estimate here,
	 * which may be large if this is a reconnect attempt */
	 * which may be large if this is a reconnect attempt
	 */
	request->rq_timeout = INITIAL_CONNECT_TIMEOUT;
	lustre_msg_set_timeout(request->rq_reqmsg, request->rq_timeout);

@@ -798,7 +811,8 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,

	if (rc) {
		/* if this reconnect to busy export - not need select new target
		 * for connecting*/
		 * for connecting
		 */
		imp->imp_force_reconnect = ptlrpc_busy_reconnect(rc);
		spin_unlock(&imp->imp_lock);
		ptlrpc_maybe_ping_import_soon(imp);
@@ -850,7 +864,8 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,

	if (!exp) {
		/* This could happen if export is cleaned during the
		   connect attempt */
		 * connect attempt
		 */
		CERROR("%s: missing export after connect\n",
		       imp->imp_obd->obd_name);
		rc = -ENODEV;
@@ -876,14 +891,16 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
		}

		/* if applies, adjust the imp->imp_msg_magic here
		 * according to reply flags */
		 * according to reply flags
		 */

		imp->imp_remote_handle =
				*lustre_msg_get_handle(request->rq_repmsg);

		/* Initial connects are allowed for clients with non-random
		 * uuids when servers are in recovery.  Simply signal the
		 * servers replay is complete and wait in REPLAY_WAIT. */
		 * servers replay is complete and wait in REPLAY_WAIT.
		 */
		if (msg_flags & MSG_CONNECT_RECOVERING) {
			CDEBUG(D_HA, "connect to %s during recovery\n",
			       obd2cli_tgt(imp->imp_obd));
@@ -922,7 +939,8 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
			 * already erased all of our state because of previous
			 * eviction. If it is in recovery - we are safe to
			 * participate since we can reestablish all of our state
			 * with server again */
			 * with server again
			 */
			if ((msg_flags & MSG_CONNECT_RECOVERING)) {
				CDEBUG(level, "%s@%s changed server handle from %#llx to %#llx but is still in recovery\n",
				       obd2cli_tgt(imp->imp_obd),
@@ -1038,7 +1056,8 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
		     ocd->ocd_version < LUSTRE_VERSION_CODE -
					LUSTRE_VERSION_OFFSET_WARN)) {
			/* Sigh, some compilers do not like #ifdef in the middle
			   of macro arguments */
			 * of macro arguments
			 */
			const char *older = "older. Consider upgrading server or downgrading client"
				;
			const char *newer = "newer than client version. Consider upgrading client"
@@ -1060,7 +1079,8 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
		 * fixup is version-limited, because we don't want to carry the
		 * OBD_CONNECT_MNE_SWAB flag around forever, just so long as we
		 * need interop with unpatched 2.2 servers.  For newer servers,
		 * the client will do MNE swabbing only as needed.  LU-1644 */
		 * the client will do MNE swabbing only as needed.  LU-1644
		 */
		if (unlikely((ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
			     !(ocd->ocd_connect_flags & OBD_CONNECT_MNE_SWAB) &&
			     OBD_OCD_VERSION_MAJOR(ocd->ocd_version) == 2 &&
@@ -1078,7 +1098,8 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
		if (ocd->ocd_connect_flags & OBD_CONNECT_CKSUM) {
			/* We sent to the server ocd_cksum_types with bits set
			 * for algorithms we understand. The server masked off
			 * the checksum types it doesn't support */
			 * the checksum types it doesn't support
			 */
			if ((ocd->ocd_cksum_types &
			     cksum_types_supported_client()) == 0) {
				LCONSOLE_WARN("The negotiation of the checksum algorithm to use with server %s failed (%x/%x), disabling checksums\n",
@@ -1092,7 +1113,8 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
			}
		} else {
			/* The server does not support OBD_CONNECT_CKSUM.
			 * Enforce ADLER for backward compatibility*/
			 * Enforce ADLER for backward compatibility
			 */
			cli->cl_supp_cksum_types = OBD_CKSUM_ADLER;
		}
		cli->cl_cksum_type = cksum_type_select(cli->cl_supp_cksum_types);
@@ -1108,7 +1130,8 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
		/* Reset ns_connect_flags only for initial connect. It might be
		 * changed in while using FS and if we reset it in reconnect
		 * this leads to losing user settings done before such as
		 * disable lru_resize, etc. */
		 * disable lru_resize, etc.
		 */
		if (old_connect_flags != exp_connect_flags(exp) ||
		    aa->pcaa_initial_connect) {
			CDEBUG(D_HA, "%s: Resetting ns_connect_flags to server flags: %#llx\n",
@@ -1122,13 +1145,14 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
		if ((ocd->ocd_connect_flags & OBD_CONNECT_AT) &&
		    (imp->imp_msg_magic == LUSTRE_MSG_MAGIC_V2))
			/* We need a per-message support flag, because
			   a. we don't know if the incoming connect reply
			      supports AT or not (in reply_in_callback)
			      until we unpack it.
			   b. failovered server means export and flags are gone
			      (in ptlrpc_send_reply).
			   Can only be set when we know AT is supported at
			   both ends */
			 * a. we don't know if the incoming connect reply
			 *    supports AT or not (in reply_in_callback)
			 *    until we unpack it.
			 * b. failovered server means export and flags are gone
			 *    (in ptlrpc_send_reply).
			 * Can only be set when we know AT is supported at
			 * both ends
			 */
			imp->imp_msghdr_flags |= MSGHDR_AT_SUPPORT;
		else
			imp->imp_msghdr_flags &= ~MSGHDR_AT_SUPPORT;
@@ -1338,7 +1362,8 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp)
		/* bug 17802:  XXX client_disconnect_export vs connect request
		 * race. if client will evicted at this time, we start
		 * invalidate thread without reference to import and import can
		 * be freed at same time. */
		 * be freed at same time.
		 */
		class_import_get(imp);
		task = kthread_run(ptlrpc_invalidate_import_thread, imp,
				     "ll_imp_inval");
@@ -1470,11 +1495,13 @@ int ptlrpc_disconnect_import(struct obd_import *imp, int noclose)
	if (req) {
		/* We are disconnecting, do not retry a failed DISCONNECT rpc if
		 * it fails.  We can get through the above with a down server
		 * if the client doesn't know the server is gone yet. */
		 * if the client doesn't know the server is gone yet.
		 */
		req->rq_no_resend = 1;

		/* We want client umounts to happen quickly, no matter the
		   server state... */
		 * server state...
		 */
		req->rq_timeout = min_t(int, req->rq_timeout,
					INITIAL_CONNECT_TIMEOUT);

@@ -1506,9 +1533,10 @@ EXPORT_SYMBOL(ptlrpc_disconnect_import);
extern unsigned int at_min, at_max, at_history;

/* Bin into timeslices using AT_BINS bins.
   This gives us a max of the last binlimit*AT_BINS secs without the storage,
   but still smoothing out a return to normalcy from a slow response.
   (E.g. remember the maximum latency in each minute of the last 4 minutes.) */
 * This gives us a max of the last binlimit*AT_BINS secs without the storage,
 * but still smoothing out a return to normalcy from a slow response.
 * (E.g. remember the maximum latency in each minute of the last 4 minutes.)
 */
int at_measured(struct adaptive_timeout *at, unsigned int val)
{
	unsigned int old = at->at_current;
@@ -1522,7 +1550,8 @@ int at_measured(struct adaptive_timeout *at, unsigned int val)

	if (val == 0)
		/* 0's don't count, because we never want our timeout to
		   drop to 0, and because 0 could mean an error */
		 * drop to 0, and because 0 could mean an error
		 */
		return 0;

	spin_lock(&at->at_lock);
@@ -1564,7 +1593,8 @@ int at_measured(struct adaptive_timeout *at, unsigned int val)

	if (at->at_flags & AT_FLG_NOHIST)
		/* Only keep last reported val; keeping the rest of the history
		   for proc only */
		 * for debugfs only
		 */
		at->at_current = val;

	if (at_max > 0)
+5 −3
Original line number Diff line number Diff line
@@ -75,7 +75,8 @@
} while (0)

/* This is a callback from the llog_* functions.
 * Assumes caller has already pushed us into the kernel context. */
 * Assumes caller has already pushed us into the kernel context.
 */
static int llog_client_open(const struct lu_env *env,
			    struct llog_handle *lgh, struct llog_logid *logid,
			    char *name, enum llog_open_param open_param)
@@ -316,8 +317,9 @@ static int llog_client_close(const struct lu_env *env,
			     struct llog_handle *handle)
{
	/* this doesn't call LLOG_ORIGIN_HANDLE_CLOSE because
	   the servers all close the file at the end of every
	   other LLOG_ RPC. */
	 *  the servers all close the file at the end of every
	 * other LLOG_ RPC.
	 */
	return 0;
}

+6 −3
Original line number Diff line number Diff line
@@ -306,7 +306,8 @@ ptlrpc_lprocfs_req_history_max_seq_write(struct file *file,

	/* This sanity check is more of an insanity check; we can still
	 * hose a kernel by allowing the request history to grow too
	 * far. */
	 * far.
	 */
	bufpages = (svc->srv_buf_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
	if (val > totalram_pages / (2 * bufpages))
		return -ERANGE;
@@ -737,7 +738,8 @@ ptlrpc_lprocfs_svc_req_history_seek(struct ptlrpc_service_part *svcpt,
		 * recent), search from it onwards.
		 * Since the service history is LRU (i.e. culled reqs will
		 * be near the head), we shouldn't have to do long
		 * re-scans */
		 * re-scans
		 */
		LASSERTF(srhi->srhi_seq == srhi->srhi_req->rq_history_seq,
			 "%s:%d: seek seq %llu, request seq %llu\n",
			 svcpt->scp_service->srv_name, svcpt->scp_cpt,
@@ -915,7 +917,8 @@ static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter)
		 * here.  The request could contain any old crap, so you
		 * must be just as careful as the service's request
		 * parser. Currently I only print stuff here I know is OK
		 * to look at coz it was set up in request_in_callback()!!! */
		 * to look at coz it was set up in request_in_callback()!!!
		 */
		seq_printf(s, "%lld:%s:%s:x%llu:%d:%s:%lld:%lds(%+lds) ",
			   req->rq_history_seq, nidstr,
			   libcfs_id2str(req->rq_peer), req->rq_xid,
Loading