Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cce3c2da authored by Oleg Drokin's avatar Oleg Drokin Committed by Greg Kroah-Hartman
Browse files

staging/lustre/obdclass: Adjust NULL comparison codestyle



All instances of "x == NULL" are changed to "!x" and
"x != NULL" to "x"

Also remove some redundant assertions.

Signed-off-by: default avatarOleg Drokin <green@linuxhacker.ru>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent eb17cc24
Loading
Loading
Loading
Loading
+5 −5
Original line number Diff line number Diff line
@@ -104,7 +104,7 @@ static int lustre_posix_acl_xattr_reduce_space(posix_acl_xattr_header **header,
		return old_size;

	new = kmemdup(*header, new_size, GFP_NOFS);
	if (unlikely(new == NULL))
	if (unlikely(!new))
		return -ENOMEM;

	kfree(*header);
@@ -124,7 +124,7 @@ static int lustre_ext_acl_xattr_reduce_space(ext_acl_xattr_header **header,
		return 0;

	new = kmemdup(*header, ext_size, GFP_NOFS);
	if (unlikely(new == NULL))
	if (unlikely(!new))
		return -ENOMEM;

	kfree(*header);
@@ -149,7 +149,7 @@ lustre_posix_acl_xattr_2ext(posix_acl_xattr_header *header, int size)
		count = CFS_ACL_XATTR_COUNT(size, posix_acl_xattr);
	esize = CFS_ACL_XATTR_SIZE(count, ext_acl_xattr);
	new = kzalloc(esize, GFP_NOFS);
	if (unlikely(new == NULL))
	if (unlikely(!new))
		return ERR_PTR(-ENOMEM);

	new->a_count = cpu_to_le32(count);
@@ -180,7 +180,7 @@ int lustre_posix_acl_xattr_filter(posix_acl_xattr_header *header, size_t size,
		return -EINVAL;

	new = kzalloc(size, GFP_NOFS);
	if (unlikely(new == NULL))
	if (unlikely(!new))
		return -ENOMEM;

	new->a_version = cpu_to_le32(CFS_ACL_XATTR_VERSION);
@@ -300,7 +300,7 @@ lustre_acl_xattr_merge2ext(posix_acl_xattr_header *posix_header, int size,
	ext_size = CFS_ACL_XATTR_SIZE(ext_count, ext_acl_xattr);

	new = kzalloc(ext_size, GFP_NOFS);
	if (unlikely(new == NULL))
	if (unlikely(!new))
		return ERR_PTR(-ENOMEM);

	for (i = 0, j = 0; i < posix_count; i++) {
+41 −42
Original line number Diff line number Diff line
@@ -93,7 +93,7 @@ static int cl_io_invariant(const struct cl_io *io)
		 * CIS_IO_GOING.
		 */
		ergo(io->ci_owned_nr > 0, io->ci_state == CIS_IO_GOING ||
		     (io->ci_state == CIS_LOCKED && up != NULL));
		     (io->ci_state == CIS_LOCKED && up));
}

/**
@@ -111,7 +111,7 @@ void cl_io_fini(const struct lu_env *env, struct cl_io *io)
		slice = container_of(io->ci_layers.prev, struct cl_io_slice,
				     cis_linkage);
		list_del_init(&slice->cis_linkage);
		if (slice->cis_iop->op[io->ci_type].cio_fini != NULL)
		if (slice->cis_iop->op[io->ci_type].cio_fini)
			slice->cis_iop->op[io->ci_type].cio_fini(env, slice);
		/*
		 * Invalidate slice to catch use after free. This assumes that
@@ -164,7 +164,7 @@ static int cl_io_init0(const struct lu_env *env, struct cl_io *io,

	result = 0;
	cl_object_for_each(scan, obj) {
		if (scan->co_ops->coo_io_init != NULL) {
		if (scan->co_ops->coo_io_init) {
			result = scan->co_ops->coo_io_init(env, scan, io);
			if (result != 0)
				break;
@@ -186,7 +186,7 @@ int cl_io_sub_init(const struct lu_env *env, struct cl_io *io,
	struct cl_thread_info *info = cl_env_info(env);

	LASSERT(obj != cl_object_top(obj));
	if (info->clt_current_io == NULL)
	if (!info->clt_current_io)
		info->clt_current_io = io;
	return cl_io_init0(env, io, iot, obj);
}
@@ -208,7 +208,7 @@ int cl_io_init(const struct lu_env *env, struct cl_io *io,
	struct cl_thread_info *info = cl_env_info(env);

	LASSERT(obj == cl_object_top(obj));
	LASSERT(info->clt_current_io == NULL);
	LASSERT(!info->clt_current_io);

	info->clt_current_io = io;
	return cl_io_init0(env, io, iot, obj);
@@ -224,7 +224,7 @@ int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
		  enum cl_io_type iot, loff_t pos, size_t count)
{
	LINVRNT(iot == CIT_READ || iot == CIT_WRITE);
	LINVRNT(io->ci_obj != NULL);
	LINVRNT(io->ci_obj);

	LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu,
			 "io range: %u [%llu, %llu) %u %u\n",
@@ -292,7 +292,7 @@ static void cl_io_locks_sort(struct cl_io *io)
		list_for_each_entry_safe(curr, temp,
					     &io->ci_lockset.cls_todo,
					     cill_linkage) {
			if (prev != NULL) {
			if (prev) {
				switch (cl_lock_descr_sort(&prev->cill_descr,
							  &curr->cill_descr)) {
				case 0:
@@ -399,11 +399,11 @@ static void cl_lock_link_fini(const struct lu_env *env, struct cl_io *io,
	struct cl_lock *lock = link->cill_lock;

	list_del_init(&link->cill_linkage);
	if (lock != NULL) {
	if (lock) {
		cl_lock_release(env, lock, "io", io);
		link->cill_lock = NULL;
	}
	if (link->cill_fini != NULL)
	if (link->cill_fini)
		link->cill_fini(env, link);
}

@@ -458,7 +458,7 @@ int cl_io_lock(const struct lu_env *env, struct cl_io *io)
	LINVRNT(cl_io_invariant(io));

	cl_io_for_each(scan, io) {
		if (scan->cis_iop->op[io->ci_type].cio_lock == NULL)
		if (!scan->cis_iop->op[io->ci_type].cio_lock)
			continue;
		result = scan->cis_iop->op[io->ci_type].cio_lock(env, scan);
		if (result != 0)
@@ -503,7 +503,7 @@ void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
		cl_lock_link_fini(env, io, link);
	}
	cl_io_for_each_reverse(scan, io) {
		if (scan->cis_iop->op[io->ci_type].cio_unlock != NULL)
		if (scan->cis_iop->op[io->ci_type].cio_unlock)
			scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
	}
	io->ci_state = CIS_UNLOCKED;
@@ -529,7 +529,7 @@ int cl_io_iter_init(const struct lu_env *env, struct cl_io *io)

	result = 0;
	cl_io_for_each(scan, io) {
		if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL)
		if (!scan->cis_iop->op[io->ci_type].cio_iter_init)
			continue;
		result = scan->cis_iop->op[io->ci_type].cio_iter_init(env,
								      scan);
@@ -556,7 +556,7 @@ void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io)
	LINVRNT(cl_io_invariant(io));

	cl_io_for_each_reverse(scan, io) {
		if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL)
		if (scan->cis_iop->op[io->ci_type].cio_iter_fini)
			scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan);
	}
	io->ci_state = CIS_IT_ENDED;
@@ -581,7 +581,7 @@ static void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io,

	/* layers have to be notified. */
	cl_io_for_each_reverse(scan, io) {
		if (scan->cis_iop->op[io->ci_type].cio_advance != NULL)
		if (scan->cis_iop->op[io->ci_type].cio_advance)
			scan->cis_iop->op[io->ci_type].cio_advance(env, scan,
								   nob);
	}
@@ -621,7 +621,7 @@ int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
	int result;

	link = kzalloc(sizeof(*link), GFP_NOFS);
	if (link != NULL) {
	if (link) {
		link->cill_descr     = *descr;
		link->cill_fini      = cl_free_io_lock_link;
		result = cl_io_lock_add(env, io, link);
@@ -648,7 +648,7 @@ int cl_io_start(const struct lu_env *env, struct cl_io *io)

	io->ci_state = CIS_IO_GOING;
	cl_io_for_each(scan, io) {
		if (scan->cis_iop->op[io->ci_type].cio_start == NULL)
		if (!scan->cis_iop->op[io->ci_type].cio_start)
			continue;
		result = scan->cis_iop->op[io->ci_type].cio_start(env, scan);
		if (result != 0)
@@ -673,7 +673,7 @@ void cl_io_end(const struct lu_env *env, struct cl_io *io)
	LINVRNT(cl_io_invariant(io));

	cl_io_for_each_reverse(scan, io) {
		if (scan->cis_iop->op[io->ci_type].cio_end != NULL)
		if (scan->cis_iop->op[io->ci_type].cio_end)
			scan->cis_iop->op[io->ci_type].cio_end(env, scan);
		/* TODO: error handling. */
	}
@@ -687,7 +687,7 @@ cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page)
	const struct cl_page_slice *slice;

	slice = cl_page_at(page, ios->cis_obj->co_lu.lo_dev->ld_type);
	LINVRNT(slice != NULL);
	LINVRNT(slice);
	return slice;
}

@@ -759,11 +759,11 @@ int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
	 * "parallel io" (see CLO_REPEAT loops in cl_lock.c).
	 */
	cl_io_for_each(scan, io) {
		if (scan->cis_iop->cio_read_page != NULL) {
		if (scan->cis_iop->cio_read_page) {
			const struct cl_page_slice *slice;

			slice = cl_io_slice_page(scan, page);
			LINVRNT(slice != NULL);
			LINVRNT(slice);
			result = scan->cis_iop->cio_read_page(env, scan, slice);
			if (result != 0)
				break;
@@ -798,7 +798,7 @@ int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
	LASSERT(cl_page_in_io(page, io));

	cl_io_for_each_reverse(scan, io) {
		if (scan->cis_iop->cio_prepare_write != NULL) {
		if (scan->cis_iop->cio_prepare_write) {
			const struct cl_page_slice *slice;

			slice = cl_io_slice_page(scan, page);
@@ -833,11 +833,11 @@ int cl_io_commit_write(const struct lu_env *env, struct cl_io *io,
	 * state. Better (and more general) way of dealing with such situation
	 * is needed.
	 */
	LASSERT(cl_page_is_owned(page, io) || page->cp_parent != NULL);
	LASSERT(cl_page_is_owned(page, io) || page->cp_parent);
	LASSERT(cl_page_in_io(page, io));

	cl_io_for_each(scan, io) {
		if (scan->cis_iop->cio_commit_write != NULL) {
		if (scan->cis_iop->cio_commit_write) {
			const struct cl_page_slice *slice;

			slice = cl_io_slice_page(scan, page);
@@ -872,7 +872,7 @@ int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
	LINVRNT(crt < ARRAY_SIZE(scan->cis_iop->req_op));

	cl_io_for_each(scan, io) {
		if (scan->cis_iop->req_op[crt].cio_submit == NULL)
		if (!scan->cis_iop->req_op[crt].cio_submit)
			continue;
		result = scan->cis_iop->req_op[crt].cio_submit(env, scan, crt,
							       queue);
@@ -900,7 +900,7 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
	int rc;

	cl_page_list_for_each(pg, &queue->c2_qin) {
		LASSERT(pg->cp_sync_io == NULL);
		LASSERT(!pg->cp_sync_io);
		pg->cp_sync_io = anchor;
	}

@@ -1026,7 +1026,7 @@ void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
{
	struct list_head *linkage = &slice->cis_linkage;

	LASSERT((linkage->prev == NULL && linkage->next == NULL) ||
	LASSERT((!linkage->prev && !linkage->next) ||
		list_empty(linkage));

	list_add_tail(linkage, &io->ci_layers);
@@ -1054,7 +1054,7 @@ void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
{
	/* it would be better to check that page is owned by "current" io, but
	 * it is not passed here. */
	LASSERT(page->cp_owner != NULL);
	LASSERT(page->cp_owner);
	LINVRNT(plist->pl_owner == current);

	lockdep_off();
@@ -1263,7 +1263,7 @@ EXPORT_SYMBOL(cl_2queue_init_page);
 */
struct cl_io *cl_io_top(struct cl_io *io)
{
	while (io->ci_parent != NULL)
	while (io->ci_parent)
		io = io->ci_parent;
	return io;
}
@@ -1296,13 +1296,13 @@ static void cl_req_free(const struct lu_env *env, struct cl_req *req)
	LASSERT(list_empty(&req->crq_pages));
	LASSERT(req->crq_nrpages == 0);
	LINVRNT(list_empty(&req->crq_layers));
	LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o != NULL));
	LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o));

	if (req->crq_o != NULL) {
	if (req->crq_o) {
		for (i = 0; i < req->crq_nrobjs; ++i) {
			struct cl_object *obj = req->crq_o[i].ro_obj;

			if (obj != NULL) {
			if (obj) {
				lu_object_ref_del_at(&obj->co_lu,
						     &req->crq_o[i].ro_obj_ref,
						     "cl_req", req);
@@ -1326,7 +1326,7 @@ static int cl_req_init(const struct lu_env *env, struct cl_req *req,
	do {
		list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
			dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
			if (dev->cd_ops->cdo_req_init != NULL) {
			if (dev->cd_ops->cdo_req_init) {
				result = dev->cd_ops->cdo_req_init(env,
								   dev, req);
				if (result != 0)
@@ -1334,7 +1334,7 @@ static int cl_req_init(const struct lu_env *env, struct cl_req *req,
			}
		}
		page = page->cp_child;
	} while (page != NULL && result == 0);
	} while (page && result == 0);
	return result;
}

@@ -1353,7 +1353,7 @@ void cl_req_completion(const struct lu_env *env, struct cl_req *req, int rc)
		slice = list_entry(req->crq_layers.prev,
				       struct cl_req_slice, crs_linkage);
		list_del_init(&slice->crs_linkage);
		if (slice->crs_ops->cro_completion != NULL)
		if (slice->crs_ops->cro_completion)
			slice->crs_ops->cro_completion(env, slice, rc);
	}
	cl_req_free(env, req);
@@ -1371,7 +1371,7 @@ struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
	LINVRNT(nr_objects > 0);

	req = kzalloc(sizeof(*req), GFP_NOFS);
	if (req != NULL) {
	if (req) {
		int result;

		req->crq_type = crt;
@@ -1380,7 +1380,7 @@ struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,

		req->crq_o = kcalloc(nr_objects, sizeof(req->crq_o[0]),
				     GFP_NOFS);
		if (req->crq_o != NULL) {
		if (req->crq_o) {
			req->crq_nrobjs = nr_objects;
			result = cl_req_init(env, req, page);
		} else
@@ -1408,7 +1408,7 @@ void cl_req_page_add(const struct lu_env *env,
	page = cl_page_top(page);

	LASSERT(list_empty(&page->cp_flight));
	LASSERT(page->cp_req == NULL);
	LASSERT(!page->cp_req);

	CL_PAGE_DEBUG(D_PAGE, env, page, "req %p, %d, %u\n",
		      req, req->crq_type, req->crq_nrpages);
@@ -1418,7 +1418,7 @@ void cl_req_page_add(const struct lu_env *env,
	page->cp_req = req;
	obj = cl_object_top(page->cp_obj);
	for (i = 0, rqo = req->crq_o; obj != rqo->ro_obj; ++i, ++rqo) {
		if (rqo->ro_obj == NULL) {
		if (!rqo->ro_obj) {
			rqo->ro_obj = obj;
			cl_object_get(obj);
			lu_object_ref_add_at(&obj->co_lu, &rqo->ro_obj_ref,
@@ -1463,11 +1463,11 @@ int cl_req_prep(const struct lu_env *env, struct cl_req *req)
	 * of objects.
	 */
	for (i = 0; i < req->crq_nrobjs; ++i)
		LASSERT(req->crq_o[i].ro_obj != NULL);
		LASSERT(req->crq_o[i].ro_obj);

	result = 0;
	list_for_each_entry(slice, &req->crq_layers, crs_linkage) {
		if (slice->crs_ops->cro_prep != NULL) {
		if (slice->crs_ops->cro_prep) {
			result = slice->crs_ops->cro_prep(env, slice);
			if (result != 0)
				break;
@@ -1501,9 +1501,8 @@ void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,

			scan = cl_page_at(page,
					  slice->crs_dev->cd_lu_dev.ld_type);
			LASSERT(scan != NULL);
			obj = scan->cpl_obj;
			if (slice->crs_ops->cro_attr_set != NULL)
			if (slice->crs_ops->cro_attr_set)
				slice->crs_ops->cro_attr_set(env, slice, obj,
							     attr + i, flags);
		}
+23 −24
Original line number Diff line number Diff line
@@ -96,7 +96,7 @@ static int cl_lock_invariant(const struct lu_env *env,

	result = atomic_read(&lock->cll_ref) > 0 &&
		cl_lock_invariant_trusted(env, lock);
	if (!result && env != NULL)
	if (!result && env)
		CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken");
	return result;
}
@@ -288,7 +288,7 @@ void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)

	LINVRNT(cl_lock_invariant(env, lock));
	obj = lock->cll_descr.cld_obj;
	LINVRNT(obj != NULL);
	LINVRNT(obj);

	CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
	       atomic_read(&lock->cll_ref), lock, RETIP);
@@ -362,7 +362,7 @@ static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
	struct lu_object_header *head;

	lock = kmem_cache_alloc(cl_lock_kmem, GFP_NOFS | __GFP_ZERO);
	if (lock != NULL) {
	if (lock) {
		atomic_set(&lock->cll_ref, 1);
		lock->cll_descr = *descr;
		lock->cll_state = CLS_NEW;
@@ -461,7 +461,7 @@ static int cl_lock_fits_into(const struct lu_env *env,

	LINVRNT(cl_lock_invariant_trusted(env, lock));
	list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
		if (slice->cls_ops->clo_fits_into != NULL &&
		if (slice->cls_ops->clo_fits_into &&
		    !slice->cls_ops->clo_fits_into(env, slice, need, io))
			return 0;
	}
@@ -524,14 +524,14 @@ static struct cl_lock *cl_lock_find(const struct lu_env *env,
	lock = cl_lock_lookup(env, obj, io, need);
	spin_unlock(&head->coh_lock_guard);

	if (lock == NULL) {
	if (!lock) {
		lock = cl_lock_alloc(env, obj, io, need);
		if (!IS_ERR(lock)) {
			struct cl_lock *ghost;

			spin_lock(&head->coh_lock_guard);
			ghost = cl_lock_lookup(env, obj, io, need);
			if (ghost == NULL) {
			if (!ghost) {
				cl_lock_get_trust(lock);
				list_add_tail(&lock->cll_linkage,
						  &head->coh_locks);
@@ -572,7 +572,7 @@ struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
		spin_lock(&head->coh_lock_guard);
		lock = cl_lock_lookup(env, obj, io, need);
		spin_unlock(&head->coh_lock_guard);
		if (lock == NULL)
		if (!lock)
			return NULL;

		cl_lock_mutex_get(env, lock);
@@ -584,7 +584,7 @@ struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
			cl_lock_put(env, lock);
			lock = NULL;
		}
	} while (lock == NULL);
	} while (!lock);

	cl_lock_hold_add(env, lock, scope, source);
	cl_lock_user_add(env, lock);
@@ -775,7 +775,7 @@ static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
		lock->cll_flags |= CLF_CANCELLED;
		list_for_each_entry_reverse(slice, &lock->cll_layers,
						cls_linkage) {
			if (slice->cls_ops->clo_cancel != NULL)
			if (slice->cls_ops->clo_cancel)
				slice->cls_ops->clo_cancel(env, slice);
		}
	}
@@ -812,7 +812,7 @@ static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
		 */
		list_for_each_entry_reverse(slice, &lock->cll_layers,
						cls_linkage) {
			if (slice->cls_ops->clo_delete != NULL)
			if (slice->cls_ops->clo_delete)
				slice->cls_ops->clo_delete(env, slice);
		}
		/*
@@ -974,7 +974,7 @@ static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
	LINVRNT(cl_lock_invariant(env, lock));

	list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
		if (slice->cls_ops->clo_state != NULL)
		if (slice->cls_ops->clo_state)
			slice->cls_ops->clo_state(env, slice, state);
	wake_up_all(&lock->cll_wq);
}
@@ -1039,7 +1039,7 @@ static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
		result = -ENOSYS;
		list_for_each_entry_reverse(slice, &lock->cll_layers,
						cls_linkage) {
			if (slice->cls_ops->clo_unuse != NULL) {
			if (slice->cls_ops->clo_unuse) {
				result = slice->cls_ops->clo_unuse(env, slice);
				if (result != 0)
					break;
@@ -1072,7 +1072,7 @@ int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
	result = -ENOSYS;
	state = cl_lock_intransit(env, lock);
	list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
		if (slice->cls_ops->clo_use != NULL) {
		if (slice->cls_ops->clo_use) {
			result = slice->cls_ops->clo_use(env, slice);
			if (result != 0)
				break;
@@ -1125,7 +1125,7 @@ static int cl_enqueue_kick(const struct lu_env *env,

	result = -ENOSYS;
	list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
		if (slice->cls_ops->clo_enqueue != NULL) {
		if (slice->cls_ops->clo_enqueue) {
			result = slice->cls_ops->clo_enqueue(env,
							     slice, io, flags);
			if (result != 0)
@@ -1215,7 +1215,7 @@ int cl_lock_enqueue_wait(const struct lu_env *env,

	LASSERT(cl_lock_is_mutexed(lock));
	LASSERT(lock->cll_state == CLS_QUEUING);
	LASSERT(lock->cll_conflict != NULL);
	LASSERT(lock->cll_conflict);

	conflict = lock->cll_conflict;
	lock->cll_conflict = NULL;
@@ -1258,7 +1258,7 @@ static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
	do {
		result = cl_enqueue_try(env, lock, io, enqflags);
		if (result == CLO_WAIT) {
			if (lock->cll_conflict != NULL)
			if (lock->cll_conflict)
				result = cl_lock_enqueue_wait(env, lock, 1);
			else
				result = cl_lock_state_wait(env, lock);
@@ -1416,7 +1416,7 @@ int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)

		result = -ENOSYS;
		list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
			if (slice->cls_ops->clo_wait != NULL) {
			if (slice->cls_ops->clo_wait) {
				result = slice->cls_ops->clo_wait(env, slice);
				if (result != 0)
					break;
@@ -1487,7 +1487,7 @@ unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)

	pound = 0;
	list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
		if (slice->cls_ops->clo_weigh != NULL) {
		if (slice->cls_ops->clo_weigh) {
			ounce = slice->cls_ops->clo_weigh(env, slice);
			pound += ounce;
			if (pound < ounce) /* over-weight^Wflow */
@@ -1523,7 +1523,7 @@ int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
	LINVRNT(cl_lock_invariant(env, lock));

	list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
		if (slice->cls_ops->clo_modify != NULL) {
		if (slice->cls_ops->clo_modify) {
			result = slice->cls_ops->clo_modify(env, slice, desc);
			if (result != 0)
				return result;
@@ -1584,7 +1584,7 @@ int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
	result = cl_lock_enclosure(env, lock, closure);
	if (result == 0) {
		list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
			if (slice->cls_ops->clo_closure != NULL) {
			if (slice->cls_ops->clo_closure) {
				result = slice->cls_ops->clo_closure(env, slice,
								     closure);
				if (result != 0)
@@ -1820,7 +1820,6 @@ static pgoff_t pgoff_at_lock(struct cl_page *page, struct cl_lock *lock)

	dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type;
	slice = cl_page_at(page, dtype);
	LASSERT(slice != NULL);
	return slice->cpl_page->cp_index;
}

@@ -1840,7 +1839,7 @@ static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
		/* refresh non-overlapped index */
		tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj, index,
					lock, 1, 0);
		if (tmp != NULL) {
		if (tmp) {
			/* Cache the first-non-overlapped index so as to skip
			 * all pages within [index, clt_fn_index). This
			 * is safe because if tmp lock is canceled, it will
@@ -1950,7 +1949,7 @@ void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
	 * already destroyed (as otherwise they will be left unprotected).
	 */
	LASSERT(ergo(!cancel,
		     head->coh_tree.rnode == NULL && head->coh_pages == 0));
		     !head->coh_tree.rnode && head->coh_pages == 0));

	spin_lock(&head->coh_lock_guard);
	while (!list_empty(&head->coh_locks)) {
@@ -2194,7 +2193,7 @@ void cl_lock_print(const struct lu_env *env, void *cookie,
		(*printer)(env, cookie, "    %s@%p: ",
			   slice->cls_obj->co_lu.lo_dev->ld_type->ldt_name,
			   slice);
		if (slice->cls_ops->clo_print != NULL)
		if (slice->cls_ops->clo_print)
			slice->cls_ops->clo_print(env, cookie, printer, slice);
		(*printer)(env, cookie, "\n");
	}
+19 −21
Original line number Diff line number Diff line
@@ -152,7 +152,7 @@ struct cl_object *cl_object_top(struct cl_object *o)
	struct cl_object_header *hdr = cl_object_header(o);
	struct cl_object *top;

	while (hdr->coh_parent != NULL)
	while (hdr->coh_parent)
		hdr = hdr->coh_parent;

	top = lu2cl(lu_object_top(&hdr->coh_lu));
@@ -217,7 +217,7 @@ int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
	top = obj->co_lu.lo_header;
	result = 0;
	list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
		if (obj->co_ops->coo_attr_get != NULL) {
		if (obj->co_ops->coo_attr_get) {
			result = obj->co_ops->coo_attr_get(env, obj, attr);
			if (result != 0) {
				if (result > 0)
@@ -249,7 +249,7 @@ int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj,
	result = 0;
	list_for_each_entry_reverse(obj, &top->loh_layers,
					co_lu.lo_linkage) {
		if (obj->co_ops->coo_attr_set != NULL) {
		if (obj->co_ops->coo_attr_set) {
			result = obj->co_ops->coo_attr_set(env, obj, attr, v);
			if (result != 0) {
				if (result > 0)
@@ -280,7 +280,7 @@ int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
	result = 0;
	list_for_each_entry_reverse(obj, &top->loh_layers,
					co_lu.lo_linkage) {
		if (obj->co_ops->coo_glimpse != NULL) {
		if (obj->co_ops->coo_glimpse) {
			result = obj->co_ops->coo_glimpse(env, obj, lvb);
			if (result != 0)
				break;
@@ -306,7 +306,7 @@ int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
	top = obj->co_lu.lo_header;
	result = 0;
	list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
		if (obj->co_ops->coo_conf_set != NULL) {
		if (obj->co_ops->coo_conf_set) {
			result = obj->co_ops->coo_conf_set(env, obj, conf);
			if (result != 0)
				break;
@@ -328,7 +328,7 @@ void cl_object_kill(const struct lu_env *env, struct cl_object *obj)
	struct cl_object_header *hdr;

	hdr = cl_object_header(obj);
	LASSERT(hdr->coh_tree.rnode == NULL);
	LASSERT(!hdr->coh_tree.rnode);
	LASSERT(hdr->coh_pages == 0);

	set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
@@ -541,7 +541,7 @@ static void cl_env_init0(struct cl_env *cle, void *debug)
{
	LASSERT(cle->ce_ref == 0);
	LASSERT(cle->ce_magic == &cl_env_init0);
	LASSERT(cle->ce_debug == NULL && cle->ce_owner == NULL);
	LASSERT(!cle->ce_debug && !cle->ce_owner);

	cle->ce_ref = 1;
	cle->ce_debug = debug;
@@ -576,7 +576,7 @@ static int cl_env_hops_keycmp(const void *key, struct hlist_node *hn)
{
	struct cl_env *cle = cl_env_hops_obj(hn);

	LASSERT(cle->ce_owner != NULL);
	LASSERT(cle->ce_owner);
	return (key == cle->ce_owner);
}

@@ -610,7 +610,7 @@ static inline void cl_env_attach(struct cl_env *cle)
	if (cle) {
		int rc;

		LASSERT(cle->ce_owner == NULL);
		LASSERT(!cle->ce_owner);
		cle->ce_owner = (void *) (long) current->pid;
		rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner,
					 &cle->ce_node);
@@ -638,7 +638,7 @@ static int cl_env_store_init(void)
				      CFS_HASH_MAX_THETA,
				      &cl_env_hops,
				      CFS_HASH_RW_BKTLOCK);
	return cl_env_hash != NULL ? 0 : -ENOMEM;
	return cl_env_hash ? 0 : -ENOMEM;
}

static void cl_env_store_fini(void)
@@ -648,7 +648,7 @@ static void cl_env_store_fini(void)

static inline struct cl_env *cl_env_detach(struct cl_env *cle)
{
	if (cle == NULL)
	if (!cle)
		cle = cl_env_fetch();

	if (cle && cle->ce_owner)
@@ -663,7 +663,7 @@ static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
	struct cl_env *cle;

	cle = kmem_cache_alloc(cl_env_kmem, GFP_NOFS | __GFP_ZERO);
	if (cle != NULL) {
	if (cle) {
		int rc;

		INIT_LIST_HEAD(&cle->ce_linkage);
@@ -717,7 +717,7 @@ static struct lu_env *cl_env_peek(int *refcheck)

	env = NULL;
	cle = cl_env_fetch();
	if (cle != NULL) {
	if (cle) {
		CL_ENV_INC(hit);
		env = &cle->ce_lu;
		*refcheck = ++cle->ce_ref;
@@ -742,7 +742,7 @@ struct lu_env *cl_env_get(int *refcheck)
	struct lu_env *env;

	env = cl_env_peek(refcheck);
	if (env == NULL) {
	if (!env) {
		env = cl_env_new(lu_context_tags_default,
				 lu_session_tags_default,
				 __builtin_return_address(0));
@@ -769,7 +769,7 @@ struct lu_env *cl_env_alloc(int *refcheck, __u32 tags)
{
	struct lu_env *env;

	LASSERT(cl_env_peek(refcheck) == NULL);
	LASSERT(!cl_env_peek(refcheck));
	env = cl_env_new(tags, tags, __builtin_return_address(0));
	if (!IS_ERR(env)) {
		struct cl_env *cle;
@@ -784,7 +784,7 @@ EXPORT_SYMBOL(cl_env_alloc);

static void cl_env_exit(struct cl_env *cle)
{
	LASSERT(cle->ce_owner == NULL);
	LASSERT(!cle->ce_owner);
	lu_context_exit(&cle->ce_lu.le_ctx);
	lu_context_exit(&cle->ce_ses);
}
@@ -803,7 +803,7 @@ void cl_env_put(struct lu_env *env, int *refcheck)
	cle = cl_env_container(env);

	LASSERT(cle->ce_ref > 0);
	LASSERT(ergo(refcheck != NULL, cle->ce_ref == *refcheck));
	LASSERT(ergo(refcheck, cle->ce_ref == *refcheck));

	CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
	if (--cle->ce_ref == 0) {
@@ -878,7 +878,7 @@ struct lu_env *cl_env_nested_get(struct cl_env_nest *nest)

	nest->cen_cookie = NULL;
	env = cl_env_peek(&nest->cen_refcheck);
	if (env != NULL) {
	if (env) {
		if (!cl_io_is_going(env))
			return env;
		cl_env_put(env, &nest->cen_refcheck);
@@ -930,14 +930,12 @@ struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
	const char       *typename;
	struct lu_device *d;

	LASSERT(ldt != NULL);

	typename = ldt->ldt_name;
	d = ldt->ldt_ops->ldto_device_alloc(env, ldt, NULL);
	if (!IS_ERR(d)) {
		int rc;

		if (site != NULL)
		if (site)
			d->ld_site = site;
		rc = ldt->ldt_ops->ldto_device_init(env, d, typename, next);
		if (rc == 0) {
+47 −50

File changed.

Preview size limit exceeded, changes collapsed.

Loading