Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6f789a6a authored by Oleg Drokin's avatar Oleg Drokin Committed by Greg Kroah-Hartman
Browse files

staging/lustre/ldlm: Adjust comments to better conform to coding style



This patch fixes "Block comments use a trailing */ on a separate line"
warnings from checkpatch.

Signed-off-by: default avatarOleg Drokin <green@linuxhacker.ru>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent ec9a1ac2
Loading
Loading
Loading
Loading
+4 −2
Original line number Diff line number Diff line
@@ -133,7 +133,8 @@ static void __rotate_change_maxhigh(struct interval_node *node,

/* The left rotation "pivots" around the link from node to node->right, and
 * - node will be linked to node->right's left child, and
 * - node->right's left child will be linked to node's right child.  */
 * - node->right's left child will be linked to node's right child.
 */
static void __rotate_left(struct interval_node *node,
			  struct interval_node **root)
{
@@ -162,7 +163,8 @@ static void __rotate_left(struct interval_node *node,

/* The right rotation "pivots" around the link from node to node->left, and
 * - node will be linked to node->left's right child, and
 * - node->left's right child will be linked to node's left child.  */
 * - node->left's right child will be linked to node's left child.
 */
static void __rotate_right(struct interval_node *node,
			   struct interval_node **root)
{
+8 −4
Original line number Diff line number Diff line
@@ -62,7 +62,8 @@
 * is the "highest lock".  This function returns the new KMS value.
 * Caller must hold lr_lock already.
 *
 * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */
 * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes!
 */
__u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
{
	struct ldlm_resource *res = lock->l_resource;
@@ -72,7 +73,8 @@ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)

	/* don't let another thread in ldlm_extent_shift_kms race in
	 * just after we finish and take our lock into account in its
	 * calculation of the kms */
	 * calculation of the kms
	 */
	lock->l_flags |= LDLM_FL_KMS_IGNORE;

	list_for_each(tmp, &res->lr_granted) {
@@ -85,7 +87,8 @@ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
			return old_kms;

		/* This extent _has_ to be smaller than old_kms (checked above)
		 * so kms can only ever be smaller or the same as old_kms. */
		 * so kms can only ever be smaller or the same as old_kms.
		 */
		if (lck->l_policy_data.l_extent.end + 1 > kms)
			kms = lck->l_policy_data.l_extent.end + 1;
	}
@@ -191,7 +194,8 @@ void ldlm_extent_add_lock(struct ldlm_resource *res,
	res->lr_itree[idx].lit_size++;

	/* even though we use interval tree to manage the extent lock, we also
	 * add the locks into grant list, for debug purpose, .. */
	 * add the locks into grant list, for debug purpose, ..
	 */
	ldlm_resource_add_lock(res, &res->lr_granted, lock);
}

+28 −15
Original line number Diff line number Diff line
@@ -107,7 +107,8 @@ ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode, __u64 flags)
		lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;

		/* when reaching here, it is under lock_res_and_lock(). Thus,
		   need call the nolock version of ldlm_lock_decref_internal*/
		 * need call the nolock version of ldlm_lock_decref_internal
		 */
		ldlm_lock_decref_internal_nolock(lock, mode);
	}

@@ -159,13 +160,15 @@ static int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
	*err = ELDLM_OK;

	/* No blocking ASTs are sent to the clients for
	 * Posix file & record locks */
	 * Posix file & record locks
	 */
	req->l_blocking_ast = NULL;

reprocess:
	if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {
		/* This loop determines where this processes locks start
		 * in the resource lr_granted list. */
		 * in the resource lr_granted list.
		 */
		list_for_each(tmp, &res->lr_granted) {
			lock = list_entry(tmp, struct ldlm_lock,
					      l_res_link);
@@ -180,7 +183,8 @@ static int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
		lockmode_verify(mode);

		/* This loop determines if there are existing locks
		 * that conflict with the new lock request. */
		 * that conflict with the new lock request.
		 */
		list_for_each(tmp, &res->lr_granted) {
			lock = list_entry(tmp, struct ldlm_lock,
					      l_res_link);
@@ -238,8 +242,8 @@ static int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
	}

	/* Scan the locks owned by this process that overlap this request.
	 * We may have to merge or split existing locks. */

	 * We may have to merge or split existing locks.
	 */
	if (!ownlocks)
		ownlocks = &res->lr_granted;

@@ -253,7 +257,8 @@ static int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
			/* If the modes are the same then we need to process
			 * locks that overlap OR adjoin the new lock. The extra
			 * logic condition is necessary to deal with arithmetic
			 * overflow and underflow. */
			 * overflow and underflow.
			 */
			if ((new->l_policy_data.l_flock.start >
			     (lock->l_policy_data.l_flock.end + 1))
			    && (lock->l_policy_data.l_flock.end !=
@@ -327,11 +332,13 @@ static int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
		 * with the request but this would complicate the reply
		 * processing since updates to req get reflected in the
		 * reply. The client side replays the lock request so
		 * it must see the original lock data in the reply. */
		 * it must see the original lock data in the reply.
		 */

		/* XXX - if ldlm_lock_new() can sleep we should
		 * release the lr_lock, allocate the new lock,
		 * and restart processing this lock. */
		 * and restart processing this lock.
		 */
		if (!new2) {
			unlock_res_and_lock(req);
			new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK,
@@ -396,7 +403,8 @@ static int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
	if (*flags != LDLM_FL_WAIT_NOREPROC) {
		/* The only one possible case for client-side calls flock
		 * policy function is ldlm_flock_completion_ast inside which
		 * carries LDLM_FL_WAIT_NOREPROC flag. */
		 * carries LDLM_FL_WAIT_NOREPROC flag.
		 */
		CERROR("Illegal parameter for client-side-only module.\n");
		LBUG();
	}
@@ -404,7 +412,8 @@ static int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
	/* In case we're reprocessing the requested lock we can't destroy
	 * it until after calling ldlm_add_ast_work_item() above so that laawi()
	 * can bump the reference count on \a req. Otherwise \a req
	 * could be freed before the completion AST can be sent.  */
	 * could be freed before the completion AST can be sent.
	 */
	if (added)
		ldlm_flock_destroy(req, mode, *flags);

@@ -458,7 +467,8 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
	/* Import invalidation. We need to actually release the lock
	 * references being held, so that it can go away. No point in
	 * holding the lock even if app still believes it has it, since
	 * server already dropped it anyway. Only for granted locks too. */
	 * server already dropped it anyway. Only for granted locks too.
	 */
	if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
	    (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
		if (lock->l_req_mode == lock->l_granted_mode &&
@@ -539,7 +549,8 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
	} else if (flags & LDLM_FL_TEST_LOCK) {
		/* fcntl(F_GETLK) request */
		/* The old mode was saved in getlk->fl_type so that if the mode
		 * in the lock changes we can decref the appropriate refcount.*/
		 * in the lock changes we can decref the appropriate refcount.
		 */
		ldlm_flock_destroy(lock, getlk->fl_type, LDLM_FL_WAIT_NOREPROC);
		switch (lock->l_granted_mode) {
		case LCK_PR:
@@ -558,7 +569,8 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
		__u64 noreproc = LDLM_FL_WAIT_NOREPROC;

		/* We need to reprocess the lock to do merges or splits
		 * with existing locks owned by this process. */
		 * with existing locks owned by this process.
		 */
		ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL);
	}
	unlock_res_and_lock(lock);
@@ -575,7 +587,8 @@ void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy,
	lpolicy->l_flock.pid = wpolicy->l_flock.lfw_pid;
	/* Compat code, old clients had no idea about owner field and
	 * relied solely on pid for ownership. Introduced in LU-104, 2.1,
	 * April 2011 */
	 * April 2011
	 */
	lpolicy->l_flock.owner = wpolicy->l_flock.lfw_pid;
}

+2 −1
Original line number Diff line number Diff line
@@ -96,7 +96,8 @@ enum {
	LDLM_CANCEL_SHRINK = 1 << 2, /* Cancel locks from shrinker. */
	LDLM_CANCEL_LRUR   = 1 << 3, /* Cancel locks from lru resize. */
	LDLM_CANCEL_NO_WAIT = 1 << 4 /* Cancel locks w/o blocking (neither
				      * sending nor waiting for any rpcs) */
				      * sending nor waiting for any rpcs)
				      */
};

int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
+18 −9
Original line number Diff line number Diff line
@@ -219,7 +219,8 @@ EXPORT_SYMBOL(client_import_find_conn);
void client_destroy_import(struct obd_import *imp)
{
	/* Drop security policy instance after all RPCs have finished/aborted
	 * to let all busy contexts be released. */
	 * to let all busy contexts be released.
	 */
	class_import_get(imp);
	class_destroy_import(imp);
	sptlrpc_import_sec_put(imp);
@@ -245,7 +246,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
	int rc;

	/* In a more perfect world, we would hang a ptlrpc_client off of
	 * obd_type and just use the values from there. */
	 * obd_type and just use the values from there.
	 */
	if (!strcmp(name, LUSTRE_OSC_NAME)) {
		rq_portal = OST_REQUEST_PORTAL;
		rp_portal = OSC_REPLY_PORTAL;
@@ -348,7 +350,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
	/* This value may be reduced at connect time in
	 * ptlrpc_connect_interpret() . We initialize it to only
	 * 1MB until we know what the performance looks like.
	 * In the future this should likely be increased. LU-1431 */
	 * In the future this should likely be increased. LU-1431
	 */
	cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES,
					  LNET_MTU >> PAGE_CACHE_SHIFT);

@@ -545,14 +548,16 @@ int client_disconnect_export(struct obd_export *exp)

	/* Mark import deactivated now, so we don't try to reconnect if any
	 * of the cleanup RPCs fails (e.g. LDLM cancel, etc).  We don't
	 * fully deactivate the import, or that would drop all requests. */
	 * fully deactivate the import, or that would drop all requests.
	 */
	spin_lock(&imp->imp_lock);
	imp->imp_deactive = 1;
	spin_unlock(&imp->imp_lock);

	/* Some non-replayable imports (MDS's OSCs) are pinged, so just
	 * delete it regardless.  (It's safe to delete an import that was
	 * never added.) */
	 * never added.)
	 */
	(void)ptlrpc_pinger_del_import(imp);

	if (obd->obd_namespace) {
@@ -564,7 +569,8 @@ int client_disconnect_export(struct obd_export *exp)
	}

	/* There's no need to hold sem while disconnecting an import,
	 * and it may actually cause deadlock in GSS. */
	 * and it may actually cause deadlock in GSS.
	 */
	up_write(&cli->cl_sem);
	rc = ptlrpc_disconnect_import(imp, 0);
	down_write(&cli->cl_sem);
@@ -573,7 +579,8 @@ int client_disconnect_export(struct obd_export *exp)

out_disconnect:
	/* Use server style - class_disconnect should be always called for
	 * o_disconnect. */
	 * o_disconnect.
	 */
	err = class_disconnect(exp);
	if (!rc && err)
		rc = err;
@@ -592,7 +599,8 @@ int target_pack_pool_reply(struct ptlrpc_request *req)
	struct obd_device *obd;

	/* Check that we still have all structures alive as this may
	 * be some late RPC at shutdown time. */
	 * be some late RPC at shutdown time.
	 */
	if (unlikely(!req->rq_export || !req->rq_export->exp_obd ||
		     !exp_connect_lru_resize(req->rq_export))) {
		lustre_msg_set_slv(req->rq_repmsg, 0);
@@ -697,7 +705,8 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id)
		 * reply ref until ptlrpc_handle_rs() is done
		 * with the reply state (if the send was successful, there
		 * would have been +1 ref for the net, which
		 * reply_out_callback leaves alone) */
		 * reply_out_callback leaves alone)
		 */
		rs->rs_on_net = 0;
		ptlrpc_rs_addref(rs);
	}
Loading