Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fc9d9b37 authored by Alexander Grund's avatar Alexander Grund
Browse files

Update WireGuard to v1.0.20211208

Change-Id: I57572439c45826cc126737874bd1b5a587f5e726
parent 92d854cc
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -15,14 +15,14 @@
#define ISRHEL7
#elif RHEL_MAJOR == 8
#define ISRHEL8
#if RHEL_MINOR >= 4
#if RHEL_MINOR >= 6
#define ISCENTOS8S
#endif
#endif
#endif

/* PaX compatibility */
#if defined(RAP_PLUGIN)
#if defined(RAP_PLUGIN) && defined(RAP_ENTRY)
#undef ENTRY
#define ENTRY RAP_ENTRY
#endif
@@ -51,7 +51,7 @@
#undef pull
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 76) && !defined(ISCENTOS8S)
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 76) && !defined(ISRHEL8) && !defined(SYM_FUNC_START)
#define SYM_FUNC_START ENTRY
#define SYM_FUNC_END ENDPROC
#endif
+63 −4
Original line number Diff line number Diff line
@@ -16,7 +16,7 @@
#define ISRHEL7
#elif RHEL_MAJOR == 8
#define ISRHEL8
#if RHEL_MINOR >= 4
#if RHEL_MINOR >= 6
#define ISCENTOS8S
#endif
#endif
@@ -515,6 +515,28 @@ static inline void __compat_kvfree(const void *addr)
#define kvfree __compat_kvfree
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)
#include <linux/vmalloc.h>
#include <linux/mm.h>
static inline void *__compat_kvmalloc_array(size_t n, size_t size, gfp_t flags)
{
	if (n != 0 && SIZE_MAX / n < size)
		return NULL;
	return kvmalloc(n * size, flags);
}
#define kvmalloc_array __compat_kvmalloc_array
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0)
#include <linux/vmalloc.h>
#include <linux/mm.h>
static inline void *__compat_kvcalloc(size_t n, size_t size, gfp_t flags)
{
        return kvmalloc_array(n, size, flags | __GFP_ZERO);
}
#define kvcalloc __compat_kvcalloc
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 9)
#include <linux/netdevice.h>
#define priv_destructor destructor
@@ -757,7 +779,7 @@ static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
#define hlist_add_behind(a, b) hlist_add_after(b, a)
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0) && !defined(ISCENTOS8S)
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0) && !defined(ISRHEL8)
#define totalram_pages() totalram_pages
#endif

@@ -831,10 +853,16 @@ static inline void skb_mark_not_on_list(struct sk_buff *skb)
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0) && !defined(ISRHEL8)
#include <net/netlink.h>
#ifndef NLA_POLICY_EXACT_LEN
#define NLA_POLICY_EXACT_LEN(_len) { .type = NLA_UNSPEC, .len = _len }
#endif
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0) && !defined(ISRHEL8)
#include <net/netlink.h>
#ifndef NLA_POLICY_MIN_LEN
#define NLA_POLICY_MIN_LEN(_len) { .type = NLA_UNSPEC, .len = _len }
#endif
#define COMPAT_CANNOT_INDIVIDUAL_NETLINK_OPS_POLICY
#endif

@@ -849,7 +877,7 @@ static inline void skb_mark_not_on_list(struct sk_buff *skb)
#endif
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0)
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0) && !defined(ISRHEL8)
#define genl_dumpit_info(cb) ({ \
	struct { struct nlattr **attrs; } *a = (void *)((u8 *)cb->args + offsetofend(struct dump_ctx, next_allowedip)); \
	BUILD_BUG_ON(sizeof(cb->args) < offsetofend(struct dump_ctx, next_allowedip) + sizeof(*a)); \
@@ -1096,6 +1124,37 @@ static const struct header_ops ip_tunnel_header_ops = { .parse_protocol = ip_tun
#endif
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0)
#include <net/dst_cache.h>
struct dst_cache_pcpu {
	unsigned long refresh_ts;
	struct dst_entry *dst;
	u32 cookie;
	union {
		struct in_addr in_saddr;
		struct in6_addr in6_saddr;
	};
};
#define COMPAT_HAS_DEFINED_DST_CACHE_PCPU
static inline void dst_cache_reset_now(struct dst_cache *dst_cache)
{
	int i;

	if (!dst_cache->cache)
		return;

	dst_cache->reset_ts = jiffies;
	for_each_possible_cpu(i) {
		struct dst_cache_pcpu *idst = per_cpu_ptr(dst_cache->cache, i);
		struct dst_entry *dst = idst->dst;

		idst->cookie = 0;
		idst->dst = NULL;
		dst_release(dst);
	}
}
#endif

#if defined(ISUBUNTU1604) || defined(ISRHEL7)
#include <linux/siphash.h>
#ifndef _WG_LINUX_SIPHASH_H
@@ -1127,7 +1186,7 @@ static const struct header_ops ip_tunnel_header_ops = { .parse_protocol = ip_tun
#undef __read_mostly
#define __read_mostly
#endif
#if (defined(RAP_PLUGIN) || defined(CONFIG_CFI_CLANG)) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
#if (defined(CONFIG_PAX) || defined(CONFIG_CFI_CLANG)) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
#include <linux/timer.h>
#define wg_expired_retransmit_handshake(a) wg_expired_retransmit_handshake(unsigned long timer)
#define wg_expired_send_keepalive(a) wg_expired_send_keepalive(unsigned long timer)
+2 −0
Original line number Diff line number Diff line
@@ -27,6 +27,7 @@ static inline u32 rt6_get_cookie(const struct rt6_info *rt)
#endif
#include <uapi/linux/in.h>

#ifndef COMPAT_HAS_DEFINED_DST_CACHE_PCPU
struct dst_cache_pcpu {
	unsigned long refresh_ts;
	struct dst_entry *dst;
@@ -36,6 +37,7 @@ struct dst_cache_pcpu {
		struct in6_addr in6_saddr;
	};
};
#endif

static void dst_cache_per_cpu_dst_set(struct dst_cache_pcpu *dst_cache,
				      struct dst_entry *dst, u32 cookie)
+4 −10
Original line number Diff line number Diff line
@@ -22,9 +22,7 @@ typedef struct {
} siphash_key_t;

u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
#endif

u64 siphash_1u64(const u64 a, const siphash_key_t *key);
u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key);
@@ -77,10 +75,9 @@ static inline u64 ___siphash_aligned(const __le64 *data, size_t len,
static inline u64 siphash(const void *data, size_t len,
			  const siphash_key_t *key)
{
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
	if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
	    !IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
		return __siphash_unaligned(data, len, key);
#endif
	return ___siphash_aligned(data, len, key);
}

@@ -91,10 +88,8 @@ typedef struct {

u32 __hsiphash_aligned(const void *data, size_t len,
		       const hsiphash_key_t *key);
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_unaligned(const void *data, size_t len,
			 const hsiphash_key_t *key);
#endif

u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key);
u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key);
@@ -130,10 +125,9 @@ static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len,
static inline u32 hsiphash(const void *data, size_t len,
			   const hsiphash_key_t *key)
{
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
	if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
	if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
	    !IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
		return __hsiphash_unaligned(data, len, key);
#endif
	return ___hsiphash_aligned(data, len, key);
}

+24 −24
Original line number Diff line number Diff line
@@ -57,6 +57,7 @@
	SIPROUND; \
	return (v0 ^ v1) ^ (v2 ^ v3);

#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
{
	const u8 *end = data + len - (len % sizeof(u64));
@@ -76,19 +77,19 @@ u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
						  bytemask_from_count(left)));
#else
	switch (left) {
	case 7: b |= ((u64)end[6]) << 48;
	case 6: b |= ((u64)end[5]) << 40;
	case 5: b |= ((u64)end[4]) << 32;
	case 7: b |= ((u64)end[6]) << 48; fallthrough;
	case 6: b |= ((u64)end[5]) << 40; fallthrough;
	case 5: b |= ((u64)end[4]) << 32; fallthrough;
	case 4: b |= le32_to_cpup(data); break;
	case 3: b |= ((u64)end[2]) << 16;
	case 3: b |= ((u64)end[2]) << 16; fallthrough;
	case 2: b |= le16_to_cpup(data); break;
	case 1: b |= end[0];
	}
#endif
	POSTAMBLE
}
#endif

#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
{
	const u8 *end = data + len - (len % sizeof(u64));
@@ -108,18 +109,17 @@ u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
						  bytemask_from_count(left)));
#else
	switch (left) {
	case 7: b |= ((u64)end[6]) << 48;
	case 6: b |= ((u64)end[5]) << 40;
	case 5: b |= ((u64)end[4]) << 32;
	case 7: b |= ((u64)end[6]) << 48; fallthrough;
	case 6: b |= ((u64)end[5]) << 40; fallthrough;
	case 5: b |= ((u64)end[4]) << 32; fallthrough;
	case 4: b |= get_unaligned_le32(end); break;
	case 3: b |= ((u64)end[2]) << 16;
	case 3: b |= ((u64)end[2]) << 16; fallthrough;
	case 2: b |= get_unaligned_le16(end); break;
	case 1: b |= end[0];
	}
#endif
	POSTAMBLE
}
#endif

/**
 * siphash_1u64 - compute 64-bit siphash PRF value of a u64
@@ -250,6 +250,7 @@ u64 siphash_3u32(const u32 first, const u32 second, const u32 third,
	HSIPROUND; \
	return (v0 ^ v1) ^ (v2 ^ v3);

#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
{
	const u8 *end = data + len - (len % sizeof(u64));
@@ -268,19 +269,19 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
						  bytemask_from_count(left)));
#else
	switch (left) {
	case 7: b |= ((u64)end[6]) << 48;
	case 6: b |= ((u64)end[5]) << 40;
	case 5: b |= ((u64)end[4]) << 32;
	case 7: b |= ((u64)end[6]) << 48; fallthrough;
	case 6: b |= ((u64)end[5]) << 40; fallthrough;
	case 5: b |= ((u64)end[4]) << 32; fallthrough;
	case 4: b |= le32_to_cpup(data); break;
	case 3: b |= ((u64)end[2]) << 16;
	case 3: b |= ((u64)end[2]) << 16; fallthrough;
	case 2: b |= le16_to_cpup(data); break;
	case 1: b |= end[0];
	}
#endif
	HPOSTAMBLE
}
#endif

#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_unaligned(const void *data, size_t len,
			 const hsiphash_key_t *key)
{
@@ -300,18 +301,17 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
						  bytemask_from_count(left)));
#else
	switch (left) {
	case 7: b |= ((u64)end[6]) << 48;
	case 6: b |= ((u64)end[5]) << 40;
	case 5: b |= ((u64)end[4]) << 32;
	case 7: b |= ((u64)end[6]) << 48; fallthrough;
	case 6: b |= ((u64)end[5]) << 40; fallthrough;
	case 5: b |= ((u64)end[4]) << 32; fallthrough;
	case 4: b |= get_unaligned_le32(end); break;
	case 3: b |= ((u64)end[2]) << 16;
	case 3: b |= ((u64)end[2]) << 16; fallthrough;
	case 2: b |= get_unaligned_le16(end); break;
	case 1: b |= end[0];
	}
#endif
	HPOSTAMBLE
}
#endif

/**
 * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32
@@ -412,6 +412,7 @@ u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third,
	HSIPROUND; \
	return v1 ^ v3;

#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
{
	const u8 *end = data + len - (len % sizeof(u32));
@@ -425,14 +426,14 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
		v0 ^= m;
	}
	switch (left) {
	case 3: b |= ((u32)end[2]) << 16;
	case 3: b |= ((u32)end[2]) << 16; fallthrough;
	case 2: b |= le16_to_cpup(data); break;
	case 1: b |= end[0];
	}
	HPOSTAMBLE
}
#endif

#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_unaligned(const void *data, size_t len,
			 const hsiphash_key_t *key)
{
@@ -447,13 +448,12 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
		v0 ^= m;
	}
	switch (left) {
	case 3: b |= ((u32)end[2]) << 16;
	case 3: b |= ((u32)end[2]) << 16; fallthrough;
	case 2: b |= get_unaligned_le16(end); break;
	case 1: b |= end[0];
	}
	HPOSTAMBLE
}
#endif

/**
 * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32
Loading