Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c16526a7 authored by Simon Kirby's avatar Simon Kirby Committed by Simon Horman
Browse files

ipvs: fix overflow on dest weight multiply



Schedulers such as lblc and lblcr require the weight to be as high as the
maximum number of active connections. In commit b552f7e3
("ipvs: unify the formula to estimate the overhead of processing
connections"), the consideration of inactconns and activeconns was cleaned
up to always count activeconns as 256 times more important than inactconns.
In cases where 3000 or more connections are expected, a weight of 3000 *
256 * 3000 connections overflows the 32-bit signed result used to determine
if rescheduling is required.

On amd64, this merely changes the multiply and comparison instructions to
64-bit. On x86, a 64-bit result is already present from imull, so only
a few more comparison instructions are emitted.

Signed-off-by: default avatarSimon Kirby <sim@hostway.ca>
Acked-by: default avatarJulian Anastasov <ja@ssi.bg>
Signed-off-by: default avatarSimon Horman <horms@verge.net.au>
parent 61c5923a
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1649,7 +1649,7 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
/* CONFIG_IP_VS_NFCT */
#endif

static inline unsigned int
static inline int
ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
{
	/*
+2 −2
Original line number Diff line number Diff line
@@ -443,8 +443,8 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
			continue;

		doh = ip_vs_dest_conn_overhead(dest);
		if (loh * atomic_read(&dest->weight) >
		    doh * atomic_read(&least->weight)) {
		if ((__s64)loh * atomic_read(&dest->weight) >
		    (__s64)doh * atomic_read(&least->weight)) {
			least = dest;
			loh = doh;
		}
+6 −6
Original line number Diff line number Diff line
@@ -200,8 +200,8 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
			continue;

		doh = ip_vs_dest_conn_overhead(dest);
		if ((loh * atomic_read(&dest->weight) >
		     doh * atomic_read(&least->weight))
		if (((__s64)loh * atomic_read(&dest->weight) >
		     (__s64)doh * atomic_read(&least->weight))
		    && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
			least = dest;
			loh = doh;
@@ -246,8 +246,8 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
		dest = rcu_dereference_protected(e->dest, 1);
		doh = ip_vs_dest_conn_overhead(dest);
		/* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
		if ((moh * atomic_read(&dest->weight) <
		     doh * atomic_read(&most->weight))
		if (((__s64)moh * atomic_read(&dest->weight) <
		     (__s64)doh * atomic_read(&most->weight))
		    && (atomic_read(&dest->weight) > 0)) {
			most = dest;
			moh = doh;
@@ -611,8 +611,8 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
			continue;

		doh = ip_vs_dest_conn_overhead(dest);
		if (loh * atomic_read(&dest->weight) >
		    doh * atomic_read(&least->weight)) {
		if ((__s64)loh * atomic_read(&dest->weight) >
		    (__s64)doh * atomic_read(&least->weight)) {
			least = dest;
			loh = doh;
		}
+4 −4
Original line number Diff line number Diff line
@@ -40,7 +40,7 @@
#include <net/ip_vs.h>


static inline unsigned int
static inline int
ip_vs_nq_dest_overhead(struct ip_vs_dest *dest)
{
	/*
@@ -59,7 +59,7 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
		  struct ip_vs_iphdr *iph)
{
	struct ip_vs_dest *dest, *least = NULL;
	unsigned int loh = 0, doh;
	int loh = 0, doh;

	IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);

@@ -92,8 +92,8 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
		}

		if (!least ||
		    (loh * atomic_read(&dest->weight) >
		     doh * atomic_read(&least->weight))) {
		    ((__s64)loh * atomic_read(&dest->weight) >
		     (__s64)doh * atomic_read(&least->weight))) {
			least = dest;
			loh = doh;
		}
+4 −4
Original line number Diff line number Diff line
@@ -44,7 +44,7 @@
#include <net/ip_vs.h>


static inline unsigned int
static inline int
ip_vs_sed_dest_overhead(struct ip_vs_dest *dest)
{
	/*
@@ -63,7 +63,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
		   struct ip_vs_iphdr *iph)
{
	struct ip_vs_dest *dest, *least;
	unsigned int loh, doh;
	int loh, doh;

	IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);

@@ -99,8 +99,8 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
		if (dest->flags & IP_VS_DEST_F_OVERLOAD)
			continue;
		doh = ip_vs_sed_dest_overhead(dest);
		if (loh * atomic_read(&dest->weight) >
		    doh * atomic_read(&least->weight)) {
		if ((__s64)loh * atomic_read(&dest->weight) >
		    (__s64)doh * atomic_read(&least->weight)) {
			least = dest;
			loh = doh;
		}
Loading