Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 839049a8 authored by NeilBrown's avatar NeilBrown Committed by J. Bruce Fields
Browse files

nfsd/idmap: drop special request deferal in favour of improved default.



The idmap code manages request deferal by waiting for a reply from
userspace rather than putting the NFS request on a queue to be retried
from the start.
Now that the common deferal code does this there is no need for the
special code in idmap.

Signed-off-by: default avatarNeilBrown <neilb@suse.de>
Signed-off-by: default avatarJ. Bruce Fields <bfields@redhat.com>
parent 8ff30fa4
Loading
Loading
Loading
Loading
+11 −94
Original line number Diff line number Diff line
@@ -482,109 +482,26 @@ nfsd_idmap_shutdown(void)
	cache_unregister(&nametoid_cache);
}

/*
 * Deferred request handling
 */

struct idmap_defer_req {
       struct cache_req		req;
       struct cache_deferred_req deferred_req;
       wait_queue_head_t	waitq;
       atomic_t			count;
};

static inline void
put_mdr(struct idmap_defer_req *mdr)
{
	if (atomic_dec_and_test(&mdr->count))
		kfree(mdr);
}

static inline void
get_mdr(struct idmap_defer_req *mdr)
{
	atomic_inc(&mdr->count);
}

static void
idmap_revisit(struct cache_deferred_req *dreq, int toomany)
{
	struct idmap_defer_req *mdr =
		container_of(dreq, struct idmap_defer_req, deferred_req);

	wake_up(&mdr->waitq);
	put_mdr(mdr);
}

static struct cache_deferred_req *
idmap_defer(struct cache_req *req)
static int
idmap_lookup(struct svc_rqst *rqstp,
		struct ent *(*lookup_fn)(struct ent *), struct ent *key,
		struct cache_detail *detail, struct ent **item)
{
	struct idmap_defer_req *mdr =
		container_of(req, struct idmap_defer_req, req);

	mdr->deferred_req.revisit = idmap_revisit;
	get_mdr(mdr);
	return (&mdr->deferred_req);
}
	int ret;

static inline int
do_idmap_lookup(struct ent *(*lookup_fn)(struct ent *), struct ent *key,
		struct cache_detail *detail, struct ent **item,
		struct idmap_defer_req *mdr)
{
	*item = lookup_fn(key);
	if (!*item)
		return -ENOMEM;
	return cache_check(detail, &(*item)->h, &mdr->req);
}

static inline int
do_idmap_lookup_nowait(struct ent *(*lookup_fn)(struct ent *),
			struct ent *key, struct cache_detail *detail,
			struct ent **item)
{
	int ret = -ENOMEM;
 retry:
	ret = cache_check(detail, &(*item)->h, &rqstp->rq_chandle);

	if (ret == -ETIMEDOUT) {
		struct ent *prev_item = *item;
		*item = lookup_fn(key);
	if (!*item)
		goto out_err;
	ret = -ETIMEDOUT;
	if (!test_bit(CACHE_VALID, &(*item)->h.flags)
			|| (*item)->h.expiry_time < seconds_since_boot()
			|| detail->flush_time > (*item)->h.last_refresh)
		goto out_put;
	ret = -ENOENT;
	if (test_bit(CACHE_NEGATIVE, &(*item)->h.flags))
		goto out_put;
	return 0;
out_put:
		if (*item != prev_item)
			goto retry;
		cache_put(&(*item)->h, detail);
out_err:
	*item = NULL;
	return ret;
	}

static int
idmap_lookup(struct svc_rqst *rqstp,
		struct ent *(*lookup_fn)(struct ent *), struct ent *key,
		struct cache_detail *detail, struct ent **item)
{
	struct idmap_defer_req *mdr;
	int ret;

	mdr = kzalloc(sizeof(*mdr), GFP_KERNEL);
	if (!mdr)
		return -ENOMEM;
	atomic_set(&mdr->count, 1);
	init_waitqueue_head(&mdr->waitq);
	mdr->req.defer = idmap_defer;
	ret = do_idmap_lookup(lookup_fn, key, detail, item, mdr);
	if (ret == -EAGAIN) {
		wait_event_interruptible_timeout(mdr->waitq,
			test_bit(CACHE_VALID, &(*item)->h.flags), 1 * HZ);
		ret = do_idmap_lookup_nowait(lookup_fn, key, detail, item);
	}
	put_mdr(mdr);
	return ret;
}