Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6ba59179 authored by Oleg Drokin's avatar Oleg Drokin Committed by Greg Kroah-Hartman
Browse files

staging/lustre/obdclass: Adjust comments to better conform to coding style



This patch fixes "Block comments use a trailing */ on a separate line"
warnings from checkpatch

Signed-off-by: default avatarOleg Drokin <green@linuxhacker.ru>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 2dfd89c4
Loading
Loading
Loading
Loading
+7 −6
Original line number Diff line number Diff line
@@ -44,6 +44,7 @@
#include "../include/obd_support.h"
#include "../include/lustre_fid.h"
#include <linux/list.h>
#include <linux/sched.h>
#include "../include/cl_object.h"
#include "cl_internal.h"

@@ -308,7 +309,8 @@ static void cl_io_locks_sort(struct cl_io *io)
							   &prev->cill_linkage);
					done = 0;
					continue; /* don't change prev: it's
						   * still "previous" */
						   * still "previous"
						   */
				case -1: /* already in order */
					break;
				}
@@ -419,7 +421,8 @@ static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
	list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
		if (!cl_lockset_match(set, &link->cill_descr)) {
			/* XXX some locking to guarantee that locks aren't
			 * expanded in between. */
			 * expanded in between.
			 */
			result = cl_lockset_lock_one(env, io, set, link);
			if (result != 0)
				break;
@@ -1053,7 +1056,8 @@ EXPORT_SYMBOL(cl_page_list_init);
void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
{
	/* it would be better to check that page is owned by "current" io, but
	 * it is not passed here. */
	 * it is not passed here.
	 */
	LASSERT(page->cp_owner);
	LINVRNT(plist->pl_owner == current);

@@ -1510,9 +1514,6 @@ void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
}
EXPORT_SYMBOL(cl_req_attr_set);

/* XXX complete(), init_completion(), and wait_for_completion(), until they are
 * implemented in libcfs. */
# include <linux/sched.h>

/**
 * Initialize synchronous io wait anchor, for transfer of \a nrpages pages.
+16 −8
Original line number Diff line number Diff line
@@ -935,7 +935,8 @@ int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
	if (result == 0) {
		/* To avoid being interrupted by the 'non-fatal' signals
		 * (SIGCHLD, for instance), we'd block them temporarily.
		 * LU-305 */
		 * LU-305
		 */
		blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);

		init_waitqueue_entry(&waiter, current);
@@ -946,7 +947,8 @@ int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
		LASSERT(cl_lock_nr_mutexed(env) == 0);

		/* Returning ERESTARTSYS instead of EINTR so syscalls
		 * can be restarted if signals are pending here */
		 * can be restarted if signals are pending here
		 */
		result = -ERESTARTSYS;
		if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) {
			schedule();
@@ -1170,7 +1172,8 @@ int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
			/* kick layers. */
			result = cl_enqueue_kick(env, lock, io, flags);
			/* For AGL case, the cl_lock::cll_state may
			 * become CLS_HELD already. */
			 * become CLS_HELD already.
			 */
			if (result == 0 && lock->cll_state == CLS_QUEUING)
				cl_lock_state_set(env, lock, CLS_ENQUEUED);
			break;
@@ -1300,7 +1303,8 @@ int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
	}

	/* Only if the lock is in CLS_HELD or CLS_ENQUEUED state, it can hold
	 * underlying resources. */
	 * underlying resources.
	 */
	if (!(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED)) {
		cl_lock_user_del(env, lock);
		return 0;
@@ -1777,13 +1781,15 @@ struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
	lock = NULL;

	need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
				    * not PHANTOM */
				    * not PHANTOM
				    */
	need->cld_start = need->cld_end = index;
	need->cld_enq_flags = 0;

	spin_lock(&head->coh_lock_guard);
	/* It is fine to match any group lock since there could be only one
	 * with a uniq gid and it conflicts with all other lock modes too */
	 * with a uniq gid and it conflicts with all other lock modes too
	 */
	list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
		if (scan != except &&
		    (scan->cll_descr.cld_mode == CLM_GROUP ||
@@ -1798,7 +1804,8 @@ struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
		    (canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
		    (pending || !(scan->cll_flags & CLF_CANCELPEND))) {
			/* Don't increase cs_hit here since this
			 * is just a helper function. */
			 * is just a helper function.
			 */
			cl_lock_get_trust(scan);
			lock = scan;
			break;
@@ -1843,7 +1850,8 @@ static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
			/* Cache the first-non-overlapped index so as to skip
			 * all pages within [index, clt_fn_index). This
			 * is safe because if tmp lock is canceled, it will
			 * discard these pages. */
			 * discard these pages.
			 */
			info->clt_fn_index = tmp->cll_descr.cld_end + 1;
			if (tmp->cll_descr.cld_end == CL_PAGE_EOF)
				info->clt_fn_index = CL_PAGE_EOF;
+2 −3
Original line number Diff line number Diff line
@@ -508,7 +508,8 @@ static int __init init_obdclass(void)

	/* Default the dirty page cache cap to 1/2 of system memory.
	 * For clients with less memory, a larger fraction is needed
	 * for other purposes (mostly for BGL). */
	 * for other purposes (mostly for BGL).
	 */
	if (totalram_pages <= 512 << (20 - PAGE_CACHE_SHIFT))
		obd_max_dirty_pages = totalram_pages / 4;
	else
@@ -543,8 +544,6 @@ static int __init init_obdclass(void)
	return err;
}

/* liblustre doesn't call cleanup_obdclass, apparently.  we carry on in this
 * ifdef to the end of the file to cover module and versioning goo.*/
static void cleanup_obdclass(void)
{
	int i;
+26 −16
Original line number Diff line number Diff line
@@ -381,7 +381,8 @@ int class_name2dev(const char *name)

		if (obd && strcmp(name, obd->obd_name) == 0) {
			/* Make sure we finished attaching before we give
			   out any references */
			 * out any references
			 */
			LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
			if (obd->obd_attached) {
				read_unlock(&obd_dev_lock);
@@ -456,8 +457,9 @@ struct obd_device *class_num2obd(int num)
EXPORT_SYMBOL(class_num2obd);

/* Search for a client OBD connected to tgt_uuid.  If grp_uuid is
   specified, then only the client with that uuid is returned,
   otherwise any client connected to the tgt is returned. */
 * specified, then only the client with that uuid is returned,
 * otherwise any client connected to the tgt is returned.
 */
struct obd_device *class_find_client_obd(struct obd_uuid *tgt_uuid,
					  const char *typ_name,
					  struct obd_uuid *grp_uuid)
@@ -488,9 +490,10 @@ struct obd_device *class_find_client_obd(struct obd_uuid *tgt_uuid,
EXPORT_SYMBOL(class_find_client_obd);

/* Iterate the obd_device list looking devices have grp_uuid. Start
   searching at *next, and if a device is found, the next index to look
   at is saved in *next. If next is NULL, then the first matching device
   will always be returned. */
 * searching at *next, and if a device is found, the next index to look
 * at is saved in *next. If next is NULL, then the first matching device
 * will always be returned.
 */
struct obd_device *class_devices_in_group(struct obd_uuid *grp_uuid, int *next)
{
	int i;
@@ -708,7 +711,8 @@ EXPORT_SYMBOL(class_export_put);

/* Creates a new export, adds it to the hash table, and returns a
 * pointer to it. The refcount is 2: one for the hash reference, and
 * one for the pointer returned by this function. */
 * one for the pointer returned by this function.
 */
struct obd_export *class_new_export(struct obd_device *obd,
				    struct obd_uuid *cluuid)
{
@@ -891,8 +895,9 @@ static void init_imp_at(struct imp_at *at)
	at_init(&at->iat_net_latency, 0, 0);
	for (i = 0; i < IMP_AT_MAX_PORTALS; i++) {
		/* max service estimates are tracked on the server side, so
		   don't use the AT history here, just use the last reported
		   val. (But keep hist for proc histogram, worst_ever) */
		 * don't use the AT history here, just use the last reported
		 * val. (But keep hist for proc histogram, worst_ever)
		 */
		at_init(&at->iat_service_estimate[i], INITIAL_CONNECT_TIMEOUT,
			AT_FLG_NOHIST);
	}
@@ -931,7 +936,8 @@ struct obd_import *class_new_import(struct obd_device *obd)
	init_imp_at(&imp->imp_at);

	/* the default magic is V2, will be used in connect RPC, and
	 * then adjusted according to the flags in request/reply. */
	 * then adjusted according to the flags in request/reply.
	 */
	imp->imp_msg_magic = LUSTRE_MSG_MAGIC_V2;

	return imp;
@@ -994,9 +1000,10 @@ EXPORT_SYMBOL(__class_export_del_lock_ref);
#endif

/* A connection defines an export context in which preallocation can
   be managed. This releases the export pointer reference, and returns
   the export handle, so the export refcount is 1 when this function
   returns. */
 * be managed. This releases the export pointer reference, and returns
 * the export handle, so the export refcount is 1 when this function
 * returns.
 */
int class_connect(struct lustre_handle *conn, struct obd_device *obd,
		  struct obd_uuid *cluuid)
{
@@ -1024,7 +1031,8 @@ EXPORT_SYMBOL(class_connect);
 * and if disconnect really need
 * 2 - removing from hash
 * 3 - in client_unlink_export
 * The export pointer passed to this function can destroyed */
 * The export pointer passed to this function can destroyed
 */
int class_disconnect(struct obd_export *export)
{
	int already_disconnected;
@@ -1041,7 +1049,8 @@ int class_disconnect(struct obd_export *export)

	/* class_cleanup(), abort_recovery(), and class_fail_export()
	 * all end up in here, and if any of them race we shouldn't
	 * call extra class_export_puts(). */
	 * call extra class_export_puts().
	 */
	if (already_disconnected)
		goto no_disconn;

@@ -1081,7 +1090,8 @@ void class_fail_export(struct obd_export *exp)

	/* Most callers into obd_disconnect are removing their own reference
	 * (request, for example) in addition to the one from the hash table.
	 * We don't have such a reference here, so make one. */
	 * We don't have such a reference here, so make one.
	 */
	class_export_get(exp);
	rc = obd_disconnect(exp);
	if (rc)
+2 −1
Original line number Diff line number Diff line
@@ -102,7 +102,8 @@ int obd_ioctl_getdata(char **buf, int *len, void __user *arg)
	/* When there are lots of processes calling vmalloc on multi-core
	 * system, the high lock contention will hurt performance badly,
	 * obdfilter-survey is an example, which relies on ioctl. So we'd
	 * better avoid vmalloc on ioctl path. LU-66 */
	 * better avoid vmalloc on ioctl path. LU-66
	 */
	*buf = libcfs_kvzalloc(hdr.ioc_len, GFP_NOFS);
	if (!*buf) {
		CERROR("Cannot allocate control buffer of len %d\n",
Loading