Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b1137468 authored by J. Bruce Fields's avatar J. Bruce Fields Committed by Trond Myklebust
Browse files

lockd: define host_for_each{_safe} macros



We've got a lot of loops like this, and I find them a little easier to
read with the macros.  More such loops are coming.

Signed-off-by: default avatarJ. Bruce Fields <bfields@citi.umich.edu>
[ cel: Forward-ported to 2.6.37 ]
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarTrond Myklebust <Trond.Myklebust@netapp.com>
parent bf269551
Loading
Loading
Loading
Loading
+55 −52
Original line number Diff line number Diff line
@@ -26,6 +26,18 @@
#define NLM_HOST_COLLECT	(120 * HZ)

static struct hlist_head	nlm_hosts[NLM_HOST_NRHASH];

#define for_each_host(host, pos, chain, table) \
	for ((chain) = (table); \
	     (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
		hlist_for_each_entry((host), (pos), (chain), h_hash)

#define for_each_host_safe(host, pos, next, chain, table) \
	for ((chain) = (table); \
	     (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
		hlist_for_each_entry_safe((host), (pos), (next), \
						(chain), h_hash)

static unsigned long		next_gc;
static int			nrhosts;
static DEFINE_MUTEX(nlm_host_mutex);
@@ -453,8 +465,7 @@ void nlm_host_rebooted(const struct nlm_reboot *info)
	 * To avoid processing a host several times, we match the nsmstate.
	 */
again:	mutex_lock(&nlm_host_mutex);
	for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
		hlist_for_each_entry(host, pos, chain, h_hash) {
	for_each_host(host, pos, chain, nlm_hosts) {
		if (host->h_nsmhandle == nsm
		 && host->h_nsmstate != info->state) {
			host->h_nsmstate = info->state;
@@ -476,7 +487,6 @@ again: mutex_lock(&nlm_host_mutex);
			goto again;
		}
	}
	}
	mutex_unlock(&nlm_host_mutex);
	nsm_release(nsm);
}
@@ -497,15 +507,13 @@ nlm_shutdown_hosts(void)

	/* First, make all hosts eligible for gc */
	dprintk("lockd: nuking all hosts...\n");
	for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
		hlist_for_each_entry(host, pos, chain, h_hash) {
	for_each_host(host, pos, chain, nlm_hosts) {
		host->h_expires = jiffies - 1;
		if (host->h_rpcclnt) {
			rpc_shutdown_client(host->h_rpcclnt);
			host->h_rpcclnt = NULL;
		}
	}
	}

	/* Then, perform a garbage collection pass */
	nlm_gc_hosts();
@@ -515,15 +523,13 @@ nlm_shutdown_hosts(void)
	if (nrhosts) {
		printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
		dprintk("lockd: %d hosts left:\n", nrhosts);
		for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
			hlist_for_each_entry(host, pos, chain, h_hash) {
		for_each_host(host, pos, chain, nlm_hosts) {
			dprintk("       %s (cnt %d use %d exp %ld)\n",
				host->h_name, atomic_read(&host->h_count),
				host->h_inuse, host->h_expires);
		}
	}
}
}

/*
 * Garbage collect any unused NLM hosts.
@@ -538,19 +544,17 @@ nlm_gc_hosts(void)
	struct nlm_host	*host;

	dprintk("lockd: host garbage collection\n");
	for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
		hlist_for_each_entry(host, pos, chain, h_hash)
	for_each_host(host, pos, chain, nlm_hosts)
		host->h_inuse = 0;
	}

	/* Mark all hosts that hold locks, blocks or shares */
	nlmsvc_mark_resources();

	for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
		hlist_for_each_entry_safe(host, pos, next, chain, h_hash) {
	for_each_host_safe(host, pos, next, chain, nlm_hosts) {
		if (atomic_read(&host->h_count) || host->h_inuse
		 || time_before(jiffies, host->h_expires)) {
				dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n",
			dprintk("nlm_gc_hosts skipping %s "
				"(cnt %d use %d exp %ld)\n",
				host->h_name, atomic_read(&host->h_count),
				host->h_inuse, host->h_expires);
			continue;
@@ -561,7 +565,6 @@ nlm_gc_hosts(void)
		nlm_destroy_host(host);
		nrhosts--;
	}
	}

	next_gc = jiffies + NLM_HOST_COLLECT;
}