Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit acb9abc1 authored by Oleg Drokin's avatar Oleg Drokin Committed by Greg Kroah-Hartman
Browse files

staging/lustre/lov: Adjust comments to better conform to coding style



This patch fixes "Block comments use a trailing */ on a separate line"
warnings from checkpatch.

Signed-off-by: default avatarOleg Drokin <green@linuxhacker.ru>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 7f05d5bb
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -162,7 +162,8 @@ static int lsm_destroy_plain(struct lov_stripe_md *lsm, struct obdo *oa,
}

/* Find minimum stripe maxbytes value.  For inactive or
 * reconnecting targets use LUSTRE_STRIPE_MAXBYTES. */
 * reconnecting targets use LUSTRE_STRIPE_MAXBYTES.
 */
static void lov_tgt_maxbytes(struct lov_tgt_desc *tgt, __u64 *stripe_maxbytes)
{
	struct obd_import *imp = tgt->ltd_obd->u.cli.cl_import;
+4 −2
Original line number Diff line number Diff line
@@ -43,7 +43,8 @@
/* lov_do_div64(a, b) returns a % b, and a = a / b.
 * The 32-bit code is LOV-specific due to knowing about stripe limits in
 * order to reduce the divisor to a 32-bit number.  If the divisor is
 * already a 32-bit value the compiler handles this directly. */
 * already a 32-bit value the compiler handles this directly.
 */
#if BITS_PER_LONG == 64
# define lov_do_div64(n, base) ({					\
	uint64_t __base = (base);					\
@@ -92,7 +93,8 @@ struct lov_request_set {
	atomic_t			set_refcount;
	struct obd_export		*set_exp;
	/* XXX: There is @set_exp already, however obd_statfs gets obd_device
	   only. */
	 * only.
	 */
	struct obd_device		*set_obd;
	int				set_count;
	atomic_t			set_completes;
+21 −11
Original line number Diff line number Diff line
@@ -160,7 +160,8 @@ static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
			 * to remember the subio. This is because lock is able
			 * to be cached, but this is not true for IO. This
			 * further means a sublock might be referenced in
			 * different io context. -jay */
			 * different io context. -jay
			 */

			sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io,
					       descr, "lov-parent", parent);
@@ -477,7 +478,8 @@ static int lov_lock_enqueue_one(const struct lu_env *env, struct lov_lock *lck,
	result = cl_enqueue_try(env, sublock, io, enqflags);
	if ((sublock->cll_state == CLS_ENQUEUED) && !(enqflags & CEF_AGL)) {
		/* if it is enqueued, try to `wait' on it---maybe it's already
		 * granted */
		 * granted
		 */
		result = cl_wait_try(env, sublock);
		if (result == CLO_REENQUEUED)
			result = CLO_WAIT;
@@ -518,7 +520,8 @@ static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
		} else {
			kmem_cache_free(lov_lock_link_kmem, link);
			/* other thread allocated sub-lock, or enqueue is no
			 * longer going on */
			 * longer going on
			 */
			cl_lock_mutex_put(env, parent);
			cl_lock_unhold(env, sublock, "lov-parent", parent);
			cl_lock_mutex_get(env, parent);
@@ -575,7 +578,8 @@ static int lov_lock_enqueue(const struct lu_env *env,
		if (!sub) {
			result = lov_sublock_fill(env, lock, io, lck, i);
			/* lov_sublock_fill() released @lock mutex,
			 * restart. */
			 * restart.
			 */
			break;
		}
		sublock = sub->lss_cl.cls_lock;
@@ -603,7 +607,8 @@ static int lov_lock_enqueue(const struct lu_env *env,
					/* take recursive mutex of sublock */
					cl_lock_mutex_get(env, sublock);
					/* need to release all locks in closure
					 * otherwise it may deadlock. LU-2683.*/
					 * otherwise it may deadlock. LU-2683.
					 */
					lov_sublock_unlock(env, sub, closure,
							   subenv);
					/* sublock and parent are held. */
@@ -647,7 +652,8 @@ static int lov_lock_unuse(const struct lu_env *env,

		/* top-lock state cannot change concurrently, because single
		 * thread (one that released the last hold) carries unlocking
		 * to the completion. */
		 * to the completion.
		 */
		LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
		lls = &lck->lls_sub[i];
		sub = lls->sub_lock;
@@ -693,7 +699,8 @@ static void lov_lock_cancel(const struct lu_env *env,

		/* top-lock state cannot change concurrently, because single
		 * thread (one that released the last hold) carries unlocking
		 * to the completion. */
		 * to the completion.
		 */
		lls = &lck->lls_sub[i];
		sub = lls->sub_lock;
		if (!sub)
@@ -773,8 +780,9 @@ static int lov_lock_wait(const struct lu_env *env,
		if (result != 0)
			break;
	}
	/* Each sublock only can be reenqueued once, so will not loop for
	 * ever. */
	/* Each sublock only can be reenqueued once, so will not loop
	 * forever.
	 */
	if (result == 0 && reenqueued != 0)
		goto again;
	cl_lock_closure_fini(closure);
@@ -823,7 +831,8 @@ static int lov_lock_use(const struct lu_env *env,
								 i, 1, rc);
			} else if (sublock->cll_state == CLS_NEW) {
				/* Sub-lock might have been canceled, while
				 * top-lock was cached. */
				 * top-lock was cached.
				 */
				result = -ESTALE;
				lov_sublock_release(env, lck, i, 1, result);
			}
@@ -928,7 +937,8 @@ static int lov_lock_fits_into(const struct lu_env *env,
	LASSERT(lov->lls_nr > 0);

	/* for top lock, it's necessary to match enq flags otherwise it will
	 * run into problem if a sublock is missing and reenqueue. */
	 * run into problem if a sublock is missing and reenqueue.
	 */
	if (need->cld_enq_flags != lov->lls_orig.cld_enq_flags)
		return 0;

+51 −27
Original line number Diff line number Diff line
@@ -61,7 +61,8 @@
#include "lov_internal.h"

/* Keep a refcount of lov->tgt usage to prevent racing with addition/deletion.
   Any function that expects lov_tgts to remain stationary must take a ref. */
 * Any function that expects lov_tgts to remain stationary must take a ref.
 */
static void lov_getref(struct obd_device *obd)
{
	struct lov_obd *lov = &obd->u.lov;
@@ -96,7 +97,8 @@ static void lov_putref(struct obd_device *obd)
			list_add(&tgt->ltd_kill, &kill);
			/* XXX - right now there is a dependency on ld_tgt_count
			 * being the maximum tgt index for computing the
			 * mds_max_easize. So we can't shrink it. */
			 * mds_max_easize. So we can't shrink it.
			 */
			lov_ost_pool_remove(&lov->lov_packed, i);
			lov->lov_tgts[i] = NULL;
			lov->lov_death_row--;
@@ -158,7 +160,8 @@ int lov_connect_obd(struct obd_device *obd, __u32 index, int activate,
	if (activate) {
		tgt_obd->obd_no_recov = 0;
		/* FIXME this is probably supposed to be
		   ptlrpc_set_import_active.  Horrible naming. */
		 * ptlrpc_set_import_active.  Horrible naming.
		 */
		ptlrpc_activate_import(imp);
	}

@@ -315,7 +318,8 @@ static int lov_disconnect(struct obd_export *exp)
	}

	/* Let's hold another reference so lov_del_obd doesn't spin through
	   putref every time */
	 * putref every time
	 */
	obd_getref(obd);

	for (i = 0; i < lov->desc.ld_tgt_count; i++) {
@@ -480,7 +484,8 @@ static int lov_notify(struct obd_device *obd, struct obd_device *watched,
				continue;

			/* don't send sync event if target not
			 * connected/activated */
			 * connected/activated
			 */
			if (is_sync &&  !lov->lov_tgts[i]->ltd_active)
				continue;

@@ -595,8 +600,9 @@ static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp,

	if (lov->lov_connects == 0) {
		/* lov_connect hasn't been called yet. We'll do the
		   lov_connect_obd on this target when that fn first runs,
		   because we don't know the connect flags yet. */
		 * lov_connect_obd on this target when that fn first runs,
		 * because we don't know the connect flags yet.
		 */
		return 0;
	}

@@ -701,8 +707,9 @@ static void __lov_del_obd(struct obd_device *obd, struct lov_tgt_desc *tgt)
	kfree(tgt);

	/* Manual cleanup - no cleanup logs to clean up the osc's.  We must
	   do it ourselves. And we can't do it from lov_cleanup,
	   because we just lost our only reference to it. */
	 * do it ourselves. And we can't do it from lov_cleanup,
	 * because we just lost our only reference to it.
	 */
	if (osc_obd)
		class_manual_cleanup(osc_obd);
}
@@ -858,7 +865,8 @@ static int lov_cleanup(struct obd_device *obd)
		/* free pool structs */
		CDEBUG(D_INFO, "delete pool %p\n", pool);
		/* In the function below, .hs_keycmp resolves to
		 * pool_hashkey_keycmp() */
		 * pool_hashkey_keycmp()
		 */
		/* coverity[overrun-buffer-val] */
		lov_pool_del(obd, pool->pool_name);
	}
@@ -878,8 +886,9 @@ static int lov_cleanup(struct obd_device *obd)
			if (lov->lov_tgts[i]->ltd_active ||
			    atomic_read(&lov->lov_refcount))
			    /* We should never get here - these
			       should have been removed in the
			     disconnect. */
			     * should have been removed in the
			     * disconnect.
			     */
				CERROR("lov tgt %d not cleaned! deathrow=%d, lovrc=%d\n",
				       i, lov->lov_death_row,
				       atomic_read(&lov->lov_refcount));
@@ -1197,7 +1206,8 @@ static int lov_setattr_interpret(struct ptlrpc_request_set *rqset,
}

/* If @oti is given, the request goes from MDS and responses from OSTs are not
   needed. Otherwise, a client is waiting for responses. */
 * needed. Otherwise, a client is waiting for responses.
 */
static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
			     struct obd_trans_info *oti,
			     struct ptlrpc_request_set *rqset)
@@ -1270,7 +1280,8 @@ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
/* find any ldlm lock of the inode in lov
 * return 0    not find
 *	1    find one
 *      < 0    error */
 *      < 0    error
 */
static int lov_find_cbdata(struct obd_export *exp,
			   struct lov_stripe_md *lsm, ldlm_iterator_t it,
			   void *data)
@@ -1366,7 +1377,8 @@ static int lov_statfs(const struct lu_env *env, struct obd_export *exp,
	int rc = 0;

	/* for obdclass we forbid using obd_statfs_rqset, but prefer using async
	 * statfs requests */
	 * statfs requests
	 */
	set = ptlrpc_prep_set();
	if (!set)
		return -ENOMEM;
@@ -1542,7 +1554,8 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
				continue;

			/* ll_umount_begin() sets force flag but for lov, not
			 * osc. Let's pass it through */
			 * osc. Let's pass it through
			 */
			osc_obd = class_exp2obd(lov->lov_tgts[i]->ltd_exp);
			osc_obd->obd_force = obddev->obd_force;
			err = obd_iocontrol(cmd, lov->lov_tgts[i]->ltd_exp,
@@ -1620,7 +1633,8 @@ static u64 fiemap_calc_fm_end_offset(struct ll_user_fiemap *fiemap,
		return -EINVAL;

	/* If we have finished mapping on previous device, shift logical
	 * offset to start of next device */
	 * offset to start of next device
	 */
	if ((lov_stripe_intersects(lsm, stripe_no, fm_start, fm_end,
				   &lun_start, &lun_end)) != 0 &&
				   local_end < lun_end) {
@@ -1628,7 +1642,8 @@ static u64 fiemap_calc_fm_end_offset(struct ll_user_fiemap *fiemap,
		*start_stripe = stripe_no;
	} else {
		/* This is a special value to indicate that caller should
		 * calculate offset in next stripe. */
		 * calculate offset in next stripe.
		 */
		fm_end_offset = 0;
		*start_stripe = (stripe_no + 1) % lsm->lsm_stripe_count;
	}
@@ -1796,7 +1811,8 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key,

		/* If this is a continuation FIEMAP call and we are on
		 * starting stripe then lun_start needs to be set to
		 * fm_end_offset */
		 * fm_end_offset
		 */
		if (fm_end_offset != 0 && cur_stripe == start_stripe)
			lun_start = fm_end_offset;

@@ -1818,7 +1834,8 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key,
		len_mapped_single_call = 0;

		/* If the output buffer is very large and the objects have many
		 * extents we may need to loop on a single OST repeatedly */
		 * extents we may need to loop on a single OST repeatedly
		 */
		ost_eof = 0;
		ost_done = 0;
		do {
@@ -1874,7 +1891,8 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key,
			if (ext_count == 0) {
				ost_done = 1;
				/* If last stripe has hole at the end,
				 * then we need to return */
				 * then we need to return
				 */
				if (cur_stripe_wrap == last_stripe) {
					fiemap->fm_mapped_extents = 0;
					goto finish;
@@ -1896,7 +1914,8 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key,
				ost_done = 1;

			/* Clear the EXTENT_LAST flag which can be present on
			 * last extent */
			 * last extent
			 */
			if (lcl_fm_ext[ext_count-1].fe_flags & FIEMAP_EXTENT_LAST)
				lcl_fm_ext[ext_count - 1].fe_flags &=
							    ~FIEMAP_EXTENT_LAST;
@@ -1925,7 +1944,8 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key,

finish:
	/* Indicate that we are returning device offsets unless file just has
	 * single stripe */
	 * single stripe
	 */
	if (lsm->lsm_stripe_count > 1)
		fiemap->fm_flags |= FIEMAP_FLAG_DEVICE_ORDER;

@@ -1933,7 +1953,8 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key,
		goto skip_last_device_calc;

	/* Check if we have reached the last stripe and whether mapping for that
	 * stripe is done. */
	 * stripe is done.
	 */
	if (cur_stripe_wrap == last_stripe) {
		if (ost_done || ost_eof)
			fiemap->fm_extents[current_extent - 1].fe_flags |=
@@ -1978,10 +1999,12 @@ static int lov_get_info(const struct lu_env *env, struct obd_export *exp,

		/* XXX This is another one of those bits that will need to
		 * change if we ever actually support nested LOVs.  It uses
		 * the lock's export to find out which stripe it is. */
		 * the lock's export to find out which stripe it is.
		 */
		/* XXX - it's assumed all the locks for deleted OSTs have
		 * been cancelled. Also, the export for deleted OSTs will
		 * be NULL and won't match the lock's export. */
		 * be NULL and won't match the lock's export.
		 */
		for (i = 0; i < lsm->lsm_stripe_count; i++) {
			loi = lsm->lsm_oinfo[i];
			if (lov_oinfo_is_dummy(loi))
@@ -2317,7 +2340,8 @@ static int __init lov_init(void)

	/* print an address of _any_ initialized kernel symbol from this
	 * module, to allow debugging with gdb that doesn't support data
	 * symbols from modules.*/
	 * symbols from modules.
	 */
	CDEBUG(D_INFO, "Lustre LOV module (%p).\n", &lov_caches);

	rc = lu_kmem_init(lov_caches);
+16 −8
Original line number Diff line number Diff line
@@ -135,7 +135,8 @@ static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
		 * Do not leave the object in cache to avoid accessing
		 * freed memory. This is because osc_object is referring to
		 * lov_oinfo of lsm_stripe_data which will be freed due to
		 * this failure. */
		 * this failure.
		 */
		cl_object_kill(env, stripe);
		cl_object_put(env, stripe);
		return -EIO;
@@ -174,7 +175,8 @@ static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
		old_lov = cl2lov(lu2cl(old_obj));
		if (old_lov->lo_layout_invalid) {
			/* the object's layout has already changed but isn't
			 * refreshed */
			 * refreshed
			 */
			lu_object_unhash(env, &stripe->co_lu);
			result = -EAGAIN;
		} else {
@@ -243,7 +245,8 @@ static int lov_init_raid0(const struct lu_env *env,
			subconf->u.coc_oinfo = oinfo;
			LASSERTF(subdev, "not init ost %d\n", ost_idx);
			/* In the function below, .hs_keycmp resolves to
			 * lu_obj_hop_keycmp() */
			 * lu_obj_hop_keycmp()
			 */
			/* coverity[overrun-buffer-val] */
			stripe = lov_sub_find(env, subdev, ofid, subconf);
			if (!IS_ERR(stripe)) {
@@ -310,7 +313,8 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
	cl_object_put(env, sub);

	/* ... wait until it is actually destroyed---sub-object clears its
	 * ->lo_sub[] slot in lovsub_object_fini() */
	 * ->lo_sub[] slot in lovsub_object_fini()
	 */
	if (r0->lo_sub[idx] == los) {
		waiter = &lov_env_info(env)->lti_waiter;
		init_waitqueue_entry(waiter, current);
@@ -318,7 +322,8 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
		set_current_state(TASK_UNINTERRUPTIBLE);
		while (1) {
			/* this wait-queue is signaled at the end of
			 * lu_object_free(). */
			 * lu_object_free().
			 */
			set_current_state(TASK_UNINTERRUPTIBLE);
			spin_lock(&r0->lo_sub_lock);
			if (r0->lo_sub[idx] == los) {
@@ -465,7 +470,8 @@ static int lov_attr_get_raid0(const struct lu_env *env, struct cl_object *obj,
	 * context, and this function is called in ccc_lock_state(), it will
	 * hit this assertion.
	 * Anyway, it's still okay to call attr_get w/o type guard as layout
	 * can't go if locks exist. */
	 * can't go if locks exist.
	 */
	/* LASSERT(atomic_read(&lsm->lsm_refc) > 1); */

	if (!r0->lo_attr_valid) {
@@ -475,7 +481,8 @@ static int lov_attr_get_raid0(const struct lu_env *env, struct cl_object *obj,

		memset(lvb, 0, sizeof(*lvb));
		/* XXX: timestamps can be negative by sanity:test_39m,
		 * how can it be? */
		 * how can it be?
		 */
		lvb->lvb_atime = LLONG_MIN;
		lvb->lvb_ctime = LLONG_MIN;
		lvb->lvb_mtime = LLONG_MIN;
@@ -845,7 +852,8 @@ static int lov_attr_get(const struct lu_env *env, struct cl_object *obj,
			struct cl_attr *attr)
{
	/* do not take lock, as this function is called under a
	 * spin-lock. Layout is protected from changing by ongoing IO. */
	 * spin-lock. Layout is protected from changing by ongoing IO.
	 */
	return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_getattr, env, obj, attr);
}

Loading