Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4c042a80 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'bpf-misc-next'



Daniel Borkmann says:

====================
Misc BPF updates

This set cleans up ldimm64 leftovers from early eBPF days and
adds couple of test cases related to this to the verifier test
suite. It also cleans up the kallsyms spinlock (had same patch
also in queue) by relaxing it through switching to _bh variant.
It fixes up test_progs in relation to htons/ntohs and adds
accessor macros for the percpu tests in test_maps.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents cd5487fb f3515b5d
Loading
Loading
Loading
Loading
+0 −9
Original line number Diff line number Diff line
@@ -604,15 +604,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
		const struct bpf_insn insn1 = insn[1];
		u64 imm64;

		if (insn1.code != 0 || insn1.src_reg != 0 ||
		    insn1.dst_reg != 0 || insn1.off != 0) {
			/* Note: verifier in BPF core must catch invalid
			 * instructions.
			 */
			pr_err_once("Invalid BPF_LD_IMM64 instruction\n");
			return -EINVAL;
		}

		imm64 = (u64)insn1.imm << 32 | (u32)imm;
		emit_a64_mov_i64(dst, imm64, ctx);

+0 −7
Original line number Diff line number Diff line
@@ -490,13 +490,6 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
			break;

		case BPF_LD | BPF_IMM | BPF_DW:
			if (insn[1].code != 0 || insn[1].src_reg != 0 ||
			    insn[1].dst_reg != 0 || insn[1].off != 0) {
				/* verifier must catch invalid insns */
				pr_err("invalid BPF_LD_IMM64 insn\n");
				return -EINVAL;
			}

			/* optimization: if imm64 is zero, use 'xor <dst>,<dst>'
			 * to save 7 bytes.
			 */
+4 −8
Original line number Diff line number Diff line
@@ -394,27 +394,23 @@ static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)

void bpf_prog_kallsyms_add(struct bpf_prog *fp)
{
	unsigned long flags;

	if (!bpf_prog_kallsyms_candidate(fp) ||
	    !capable(CAP_SYS_ADMIN))
		return;

	spin_lock_irqsave(&bpf_lock, flags);
	spin_lock_bh(&bpf_lock);
	bpf_prog_ksym_node_add(fp->aux);
	spin_unlock_irqrestore(&bpf_lock, flags);
	spin_unlock_bh(&bpf_lock);
}

void bpf_prog_kallsyms_del(struct bpf_prog *fp)
{
	unsigned long flags;

	if (!bpf_prog_kallsyms_candidate(fp))
		return;

	spin_lock_irqsave(&bpf_lock, flags);
	spin_lock_bh(&bpf_lock);
	bpf_prog_ksym_node_del(fp->aux);
	spin_unlock_irqrestore(&bpf_lock, flags);
	spin_unlock_bh(&bpf_lock);
}

static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
+26 −0
Original line number Diff line number Diff line
@@ -6,6 +6,25 @@
#include <string.h>
#include <errno.h>

#include <asm/byteorder.h>

#if __BYTE_ORDER == __LITTLE_ENDIAN
# define __bpf_ntohs(x)		__builtin_bswap16(x)
# define __bpf_htons(x)		__builtin_bswap16(x)
#elif __BYTE_ORDER == __BIG_ENDIAN
# define __bpf_ntohs(x)		(x)
# define __bpf_htons(x)		(x)
#else
# error "Fix your __BYTE_ORDER?!"
#endif

#define bpf_htons(x)				\
	(__builtin_constant_p(x) ?		\
	 __constant_htons(x) : __bpf_htons(x))
#define bpf_ntohs(x)				\
	(__builtin_constant_p(x) ?		\
	 __constant_ntohs(x) : __bpf_ntohs(x))

static inline unsigned int bpf_num_possible_cpus(void)
{
	static const char *fcpu = "/sys/devices/system/cpu/possible";
@@ -35,4 +54,11 @@ static inline unsigned int bpf_num_possible_cpus(void)
	return possible_cpus;
}

#define __bpf_percpu_val_align	__attribute__((__aligned__(8)))

#define BPF_DECLARE_PERCPU(type, name)				\
	struct { type v; /* padding */ } __bpf_percpu_val_align	\
		name[bpf_num_possible_cpus()]
#define bpf_percpu(name, cpu) name[(cpu)].v

#endif /* __BPF_UTIL__ */
+5 −6
Original line number Diff line number Diff line
@@ -19,9 +19,8 @@
#include <linux/udp.h>
#include "bpf_helpers.h"
#include "test_iptunnel_common.h"
#include "bpf_util.h"

#define htons __builtin_bswap16
#define ntohs __builtin_bswap16
int _version SEC("version") = 1;

static inline __u32 rol32(__u32 word, unsigned int shift)
@@ -355,7 +354,7 @@ static __always_inline int process_packet(void *data, __u64 off, void *data_end,
		iph_len = sizeof(struct ipv6hdr);
		protocol = ip6h->nexthdr;
		pckt.proto = protocol;
		pkt_bytes = ntohs(ip6h->payload_len);
		pkt_bytes = bpf_ntohs(ip6h->payload_len);
		off += iph_len;
		if (protocol == IPPROTO_FRAGMENT) {
			return TC_ACT_SHOT;
@@ -377,7 +376,7 @@ static __always_inline int process_packet(void *data, __u64 off, void *data_end,

		protocol = iph->protocol;
		pckt.proto = protocol;
		pkt_bytes = ntohs(iph->tot_len);
		pkt_bytes = bpf_ntohs(iph->tot_len);
		off += IPV4_HDR_LEN_NO_OPT;

		if (iph->frag_off & PCKT_FRAGMENTED)
@@ -464,9 +463,9 @@ int balancer_ingress(struct __sk_buff *ctx)
	if (data + nh_off > data_end)
		return TC_ACT_SHOT;
	eth_proto = eth->eth_proto;
	if (eth_proto == htons(ETH_P_IP))
	if (eth_proto == bpf_htons(ETH_P_IP))
		return process_packet(data, nh_off, data_end, false, ctx);
	else if (eth_proto == htons(ETH_P_IPV6))
	else if (eth_proto == bpf_htons(ETH_P_IPV6))
		return process_packet(data, nh_off, data_end, true, ctx);
	else
		return TC_ACT_SHOT;
Loading