Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3211af11 authored by J. Bruce Fields's avatar J. Bruce Fields
Browse files

svcrpc: cache deferral cleanup



Attempt to make obvious the first-try-sleeping-then-try-deferral logic
by putting that logic into a top-level function that calls helpers.

Signed-off-by: default avatarJ. Bruce Fields <bfields@redhat.com>
parent 6610f720
Loading
Loading
Loading
Loading
+79 −64
Original line number Diff line number Diff line
@@ -509,17 +509,6 @@ static LIST_HEAD(cache_defer_list);
static struct list_head cache_defer_hash[DFR_HASHSIZE];
static int cache_defer_cnt;

struct thread_deferred_req {
	struct cache_deferred_req handle;
	struct completion completion;
};
static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
{
	struct thread_deferred_req *dr =
		container_of(dreq, struct thread_deferred_req, handle);
	complete(&dr->completion);
}

static void __unhash_deferred_req(struct cache_deferred_req *dreq)
{
	list_del_init(&dreq->recent);
@@ -537,29 +526,9 @@ static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_he
	list_add(&dreq->hash, &cache_defer_hash[hash]);
}

static int cache_defer_req(struct cache_req *req, struct cache_head *item)
static int setup_deferral(struct cache_deferred_req *dreq, struct cache_head *item)
{
	struct cache_deferred_req *dreq, *discard;
	struct thread_deferred_req sleeper;

	if (cache_defer_cnt >= DFR_MAX) {
		/* too much in the cache, randomly drop this one,
		 * or continue and drop the oldest below
		 */
		if (net_random()&1)
			return -ENOMEM;
	}
	if (req->thread_wait) {
		dreq = &sleeper.handle;
		sleeper.completion =
			COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
		dreq->revisit = cache_restart_thread;
	} else
		dreq = req->defer(req);

 retry:
	if (dreq == NULL)
		return -ENOMEM;
	struct cache_deferred_req *discard;

	dreq->item = item;

@@ -585,8 +554,34 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item)
		cache_revisit_request(item);
		return -EAGAIN;
	}
	return 0;
}

struct thread_deferred_req {
	struct cache_deferred_req handle;
	struct completion completion;
};

static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
{
	struct thread_deferred_req *dr =
		container_of(dreq, struct thread_deferred_req, handle);
	complete(&dr->completion);
}

static int cache_wait_req(struct cache_req *req, struct cache_head *item)
{
	struct thread_deferred_req sleeper;
	struct cache_deferred_req *dreq = &sleeper.handle;
	int ret;

	sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
	dreq->revisit = cache_restart_thread;

	ret = setup_deferral(dreq, item);
	if (ret)
		return ret;

	if (dreq == &sleeper.handle) {
	if (wait_for_completion_interruptible_timeout(
		    &sleeper.completion, req->thread_wait) <= 0) {
		/* The completion wasn't completed, so we need
@@ -610,8 +605,7 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item)
		/* item is still pending, try request
		 * deferral
		 */
			dreq = req->defer(req);
			goto retry;
		return -ETIMEDOUT;
	}
	/* only return success if we actually deferred the
	 * request.  In this case we waited until it was
@@ -620,7 +614,28 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item)
	 */
	return -EEXIST;
}
	return 0;

static int cache_defer_req(struct cache_req *req, struct cache_head *item)
{
	struct cache_deferred_req *dreq;
	int ret;

	if (cache_defer_cnt >= DFR_MAX) {
		/* too much in the cache, randomly drop this one,
		 * or continue and drop the oldest
		 */
		if (net_random()&1)
			return -ENOMEM;
	}
	if (req->thread_wait) {
		ret = cache_wait_req(req, item);
		if (ret != -ETIMEDOUT)
			return ret;
	}
	dreq = req->defer(req);
	if (dreq == NULL)
		return -ENOMEM;
	return setup_deferral(dreq, item);
}

static void cache_revisit_request(struct cache_head *item)