Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a0ef5e19 authored by Jeff Layton's avatar Jeff Layton Committed by J. Bruce Fields
Browse files

nfsd: don't try to reuse an expired DRC entry off the list



Currently when we are processing a request, we try to scrape an expired
or over-limit entry off the list in preference to allocating a new one
from the slab.

This is unnecessarily complicated. Just use the slab layer.

Signed-off-by: default avatarJeff Layton <jlayton@redhat.com>
Signed-off-by: default avatarJ. Bruce Fields <bfields@redhat.com>
parent 28303ca3
Loading
Loading
Loading
Loading
+4 −32
Original line number Diff line number Diff line
@@ -131,13 +131,6 @@ nfsd_reply_cache_alloc(void)
	return rp;
}

static void
nfsd_reply_cache_unhash(struct svc_cacherep *rp)
{
	hlist_del_init(&rp->c_hash);
	list_del_init(&rp->c_lru);
}

static void
nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
{
@@ -416,22 +409,8 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)

	/*
	 * Since the common case is a cache miss followed by an insert,
	 * preallocate an entry. First, try to reuse the first entry on the LRU
	 * if it works, then go ahead and prune the LRU list.
	 * preallocate an entry.
	 */
	spin_lock(&cache_lock);
	if (!list_empty(&lru_head)) {
		rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru);
		if (nfsd_cache_entry_expired(rp) ||
		    num_drc_entries >= max_drc_entries) {
			nfsd_reply_cache_unhash(rp);
			prune_cache_entries();
			goto search_cache;
		}
	}

	/* No expired ones available, allocate a new one. */
	spin_unlock(&cache_lock);
	rp = nfsd_reply_cache_alloc();
	spin_lock(&cache_lock);
	if (likely(rp)) {
@@ -439,7 +418,9 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
		drc_mem_usage += sizeof(*rp);
	}

search_cache:
	/* go ahead and prune the cache */
	prune_cache_entries();

	found = nfsd_cache_search(rqstp, csum);
	if (found) {
		if (likely(rp))
@@ -453,15 +434,6 @@ search_cache:
		goto out;
	}

	/*
	 * We're keeping the one we just allocated. Are we now over the
	 * limit? Prune one off the tip of the LRU in trade for the one we
	 * just allocated if so.
	 */
	if (num_drc_entries >= max_drc_entries)
		nfsd_reply_cache_free_locked(list_first_entry(&lru_head,
						struct svc_cacherep, c_lru));

	nfsdstats.rcmisses++;
	rqstp->rq_cacherep = rp;
	rp->c_state = RC_INPROG;