Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ba3a3ce1 authored by Julian Anastasov's avatar Julian Anastasov Committed by Pablo Neira Ayuso
Browse files

ipvs: convert sched_lock to spin lock



As all read_locks are gone spin lock is preferred.

Signed-off-by: default avatarJulian Anastasov <ja@ssi.bg>
Signed-off-by: default avatarSimon Horman <horms@verge.net.au>
parent ed3ffc4e
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -734,7 +734,7 @@ struct ip_vs_service {

	/* for scheduling */
	struct ip_vs_scheduler	*scheduler;    /* bound scheduler object */
	rwlock_t		sched_lock;    /* lock sched_data */
	spinlock_t		sched_lock;    /* lock sched_data */
	void			*sched_data;   /* scheduler application data */

	/* alternate persistence engine */
+1 −1
Original line number Diff line number Diff line
@@ -1219,7 +1219,7 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
	svc->net = net;

	INIT_LIST_HEAD(&svc->destinations);
	rwlock_init(&svc->sched_lock);
	spin_lock_init(&svc->sched_lock);
	spin_lock_init(&svc->stats.lock);

	/* Bind the scheduler */
+9 −9
Original line number Diff line number Diff line
@@ -194,7 +194,7 @@ ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl,

/*
 * Create or update an ip_vs_lblc_entry, which is a mapping of a destination IP
 * address to a server. Called under write lock.
 * address to a server. Called under spin lock.
 */
static inline struct ip_vs_lblc_entry *
ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr,
@@ -242,7 +242,7 @@ static void ip_vs_lblc_flush(struct ip_vs_service *svc)
	struct hlist_node *next;
	int i;

	write_lock_bh(&svc->sched_lock);
	spin_lock_bh(&svc->sched_lock);
	tbl->dead = 1;
	for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
		hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
@@ -250,7 +250,7 @@ static void ip_vs_lblc_flush(struct ip_vs_service *svc)
			atomic_dec(&tbl->entries);
		}
	}
	write_unlock_bh(&svc->sched_lock);
	spin_unlock_bh(&svc->sched_lock);
}

static int sysctl_lblc_expiration(struct ip_vs_service *svc)
@@ -274,7 +274,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
	for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
		j = (j + 1) & IP_VS_LBLC_TAB_MASK;

		write_lock(&svc->sched_lock);
		spin_lock(&svc->sched_lock);
		hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
			if (time_before(now,
					en->lastuse +
@@ -284,7 +284,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
			ip_vs_lblc_free(en);
			atomic_dec(&tbl->entries);
		}
		write_unlock(&svc->sched_lock);
		spin_unlock(&svc->sched_lock);
	}
	tbl->rover = j;
}
@@ -330,7 +330,7 @@ static void ip_vs_lblc_check_expire(unsigned long data)
	for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
		j = (j + 1) & IP_VS_LBLC_TAB_MASK;

		write_lock(&svc->sched_lock);
		spin_lock(&svc->sched_lock);
		hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
			if (time_before(now, en->lastuse + ENTRY_TIMEOUT))
				continue;
@@ -339,7 +339,7 @@ static void ip_vs_lblc_check_expire(unsigned long data)
			atomic_dec(&tbl->entries);
			goal--;
		}
		write_unlock(&svc->sched_lock);
		spin_unlock(&svc->sched_lock);
		if (goal <= 0)
			break;
	}
@@ -527,10 +527,10 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
	}

	/* If we fail to create a cache entry, we'll just use the valid dest */
	write_lock(&svc->sched_lock);
	spin_lock(&svc->sched_lock);
	if (!tbl->dead)
		ip_vs_lblc_new(tbl, &iph.daddr, dest);
	write_unlock(&svc->sched_lock);
	spin_unlock(&svc->sched_lock);

out:
	IP_VS_DBG_BUF(6, "LBLC: destination IP address %s --> server %s:%d\n",
+13 −13
Original line number Diff line number Diff line
@@ -368,7 +368,7 @@ ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl,

/*
 * Create or update an ip_vs_lblcr_entry, which is a mapping of a destination
 * IP address to a server. Called under write lock.
 * IP address to a server. Called under spin lock.
 */
static inline struct ip_vs_lblcr_entry *
ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr,
@@ -412,14 +412,14 @@ static void ip_vs_lblcr_flush(struct ip_vs_service *svc)
	struct ip_vs_lblcr_entry *en;
	struct hlist_node *next;

	write_lock_bh(&svc->sched_lock);
	spin_lock_bh(&svc->sched_lock);
	tbl->dead = 1;
	for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
		hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
			ip_vs_lblcr_free(en);
		}
	}
	write_unlock_bh(&svc->sched_lock);
	spin_unlock_bh(&svc->sched_lock);
}

static int sysctl_lblcr_expiration(struct ip_vs_service *svc)
@@ -443,7 +443,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
	for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
		j = (j + 1) & IP_VS_LBLCR_TAB_MASK;

		write_lock(&svc->sched_lock);
		spin_lock(&svc->sched_lock);
		hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
			if (time_after(en->lastuse +
				       sysctl_lblcr_expiration(svc), now))
@@ -452,7 +452,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
			ip_vs_lblcr_free(en);
			atomic_dec(&tbl->entries);
		}
		write_unlock(&svc->sched_lock);
		spin_unlock(&svc->sched_lock);
	}
	tbl->rover = j;
}
@@ -498,7 +498,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data)
	for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
		j = (j + 1) & IP_VS_LBLCR_TAB_MASK;

		write_lock(&svc->sched_lock);
		spin_lock(&svc->sched_lock);
		hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
			if (time_before(now, en->lastuse+ENTRY_TIMEOUT))
				continue;
@@ -507,7 +507,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data)
			atomic_dec(&tbl->entries);
			goal--;
		}
		write_unlock(&svc->sched_lock);
		spin_unlock(&svc->sched_lock);
		if (goal <= 0)
			break;
	}
@@ -678,7 +678,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
		if (atomic_read(&en->set.size) > 1 &&
		    time_after(jiffies, en->set.lastmod +
				sysctl_lblcr_expiration(svc))) {
			write_lock(&svc->sched_lock);
			spin_lock(&svc->sched_lock);
			if (atomic_read(&en->set.size) > 1) {
				struct ip_vs_dest *m;

@@ -686,7 +686,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
				if (m)
					ip_vs_dest_set_erase(&en->set, m);
			}
			write_unlock(&svc->sched_lock);
			spin_unlock(&svc->sched_lock);
		}

		/* If the destination is not overloaded, use it */
@@ -701,10 +701,10 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
		}

		/* Update our cache entry */
		write_lock(&svc->sched_lock);
		spin_lock(&svc->sched_lock);
		if (!tbl->dead)
			ip_vs_dest_set_insert(&en->set, dest, true);
		write_unlock(&svc->sched_lock);
		spin_unlock(&svc->sched_lock);
		goto out;
	}

@@ -716,10 +716,10 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
	}

	/* If we fail to create a cache entry, we'll just use the valid dest */
	write_lock(&svc->sched_lock);
	spin_lock(&svc->sched_lock);
	if (!tbl->dead)
		ip_vs_lblcr_new(tbl, &iph.daddr, dest);
	write_unlock(&svc->sched_lock);
	spin_unlock(&svc->sched_lock);

out:
	IP_VS_DBG_BUF(6, "LBLCR: destination IP address %s --> server %s:%d\n",
+5 −5
Original line number Diff line number Diff line
@@ -39,14 +39,14 @@ static int ip_vs_rr_del_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest)
{
	struct list_head *p;

	write_lock_bh(&svc->sched_lock);
	spin_lock_bh(&svc->sched_lock);
	p = (struct list_head *) svc->sched_data;
	/* dest is already unlinked, so p->prev is not valid but
	 * p->next is valid, use it to reach previous entry.
	 */
	if (p == &dest->n_list)
		svc->sched_data = p->next->prev;
	write_unlock_bh(&svc->sched_lock);
	spin_unlock_bh(&svc->sched_lock);
	return 0;
}

@@ -63,7 +63,7 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)

	IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);

	write_lock(&svc->sched_lock);
	spin_lock(&svc->sched_lock);
	p = (struct list_head *) svc->sched_data;
	last = dest = list_entry(p, struct ip_vs_dest, n_list);

@@ -85,13 +85,13 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
	} while (pass < 2 && p != &svc->destinations);

stop:
	write_unlock(&svc->sched_lock);
	spin_unlock(&svc->sched_lock);
	ip_vs_scheduler_err(svc, "no destination available");
	return NULL;

  out:
	svc->sched_data = &dest->n_list;
	write_unlock(&svc->sched_lock);
	spin_unlock(&svc->sched_lock);
	IP_VS_DBG_BUF(6, "RR: server %s:%u "
		      "activeconns %d refcnt %d weight %d\n",
		      IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port),
Loading