Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 11174492 authored by NeilBrown's avatar NeilBrown Committed by J. Bruce Fields
Browse files

sunrpc/cache: change deferred-request hash table to use hlist.



Being a hash table, hlist is the best option.

There is currently some ugliness were we treat "->next == NULL" as
a special case to avoid having to initialise the whole array.
This change nicely gets rid of that case.

Signed-off-by: default avatarNeilBrown <neilb@suse.de>
Signed-off-by: default avatarJ. Bruce Fields <bfields@redhat.com>
parent 2ed5282c
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -133,7 +133,7 @@ struct cache_req {
 * delayed awaiting cache-fill
 */
struct cache_deferred_req {
	struct list_head	hash;	/* on hash chain */
	struct hlist_node	hash;	/* on hash chain */
	struct list_head	recent; /* on fifo */
	struct cache_head	*item;  /* cache item we wait on */
	void			*owner; /* we might need to discard all defered requests
+10 −18
Original line number Diff line number Diff line
@@ -506,13 +506,13 @@ EXPORT_SYMBOL_GPL(cache_purge);

static DEFINE_SPINLOCK(cache_defer_lock);
static LIST_HEAD(cache_defer_list);
static struct list_head cache_defer_hash[DFR_HASHSIZE];
static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
static int cache_defer_cnt;

static void __unhash_deferred_req(struct cache_deferred_req *dreq)
{
	list_del_init(&dreq->recent);
	list_del_init(&dreq->hash);
	hlist_del_init(&dreq->hash);
	cache_defer_cnt--;
}

@@ -521,9 +521,7 @@ static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_he
	int hash = DFR_HASH(item);

	list_add(&dreq->recent, &cache_defer_list);
	if (cache_defer_hash[hash].next == NULL)
		INIT_LIST_HEAD(&cache_defer_hash[hash]);
	list_add(&dreq->hash, &cache_defer_hash[hash]);
	hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
}

static int setup_deferral(struct cache_deferred_req *dreq, struct cache_head *item)
@@ -588,7 +586,7 @@ static int cache_wait_req(struct cache_req *req, struct cache_head *item)
		 * to clean up
		 */
		spin_lock(&cache_defer_lock);
		if (!list_empty(&sleeper.handle.hash)) {
		if (!hlist_unhashed(&sleeper.handle.hash)) {
			__unhash_deferred_req(&sleeper.handle);
			spin_unlock(&cache_defer_lock);
		} else {
@@ -642,24 +640,18 @@ static void cache_revisit_request(struct cache_head *item)
{
	struct cache_deferred_req *dreq;
	struct list_head pending;

	struct list_head *lp;
	struct hlist_node *lp, *tmp;
	int hash = DFR_HASH(item);

	INIT_LIST_HEAD(&pending);
	spin_lock(&cache_defer_lock);

	lp = cache_defer_hash[hash].next;
	if (lp) {
		while (lp != &cache_defer_hash[hash]) {
			dreq = list_entry(lp, struct cache_deferred_req, hash);
			lp = lp->next;
	hlist_for_each_entry_safe(dreq, lp, tmp, &cache_defer_hash[hash], hash)
		if (dreq->item == item) {
			__unhash_deferred_req(dreq);
			list_add(&dreq->recent, &pending);
		}
		}
	}

	spin_unlock(&cache_defer_lock);

	while (!list_empty(&pending)) {