Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1f499d6a authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'filter-next'



Alexei Starovoitov says:

====================
internal BPF jit for x64 and JITed seccomp

Internal BPF JIT compiler for x86_64 replaces classic BPF JIT.
Use it in seccomp and in tracing filters (sent as separate patch)
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9509b1c1 8f577cad
Loading
Loading
Loading
Loading
+18 −59
Original line number Diff line number Diff line
@@ -12,13 +12,16 @@

/*
 * Calling convention :
 * rdi : skb pointer
 * rbx : skb pointer (callee saved)
 * esi : offset of byte(s) to fetch in skb (can be scratched)
 * r8  : copy of skb->data
 * r10 : copy of skb->data
 * r9d : hlen = skb->len - skb->data_len
 */
#define SKBDATA	%r8
#define SKBDATA	%r10
#define SKF_MAX_NEG_OFF    $(-0x200000) /* SKF_LL_OFF from filter.h */
#define MAX_BPF_STACK (512 /* from filter.h */ + \
	32 /* space for rbx,r13,r14,r15 */ + \
	8 /* space for skb_copy_bits */)

sk_load_word:
	.globl	sk_load_word
@@ -68,53 +71,31 @@ sk_load_byte_positive_offset:
	movzbl	(SKBDATA,%rsi),%eax
	ret

/**
 * sk_load_byte_msh - BPF_S_LDX_B_MSH helper
 *
 * Implements BPF_S_LDX_B_MSH : ldxb  4*([offset]&0xf)
 * Must preserve A accumulator (%eax)
 * Inputs : %esi is the offset value
 */
sk_load_byte_msh:
	.globl	sk_load_byte_msh
	test	%esi,%esi
	js	bpf_slow_path_byte_msh_neg

sk_load_byte_msh_positive_offset:
	.globl	sk_load_byte_msh_positive_offset
	cmp	%esi,%r9d      /* if (offset >= hlen) goto bpf_slow_path_byte_msh */
	jle	bpf_slow_path_byte_msh
	movzbl	(SKBDATA,%rsi),%ebx
	and	$15,%bl
	shl	$2,%bl
	ret

/* rsi contains offset and can be scratched */
#define bpf_slow_path_common(LEN)		\
	push	%rdi;    /* save skb */		\
	mov	%rbx, %rdi; /* arg1 == skb */	\
	push	%r9;				\
	push	SKBDATA;			\
/* rsi already has offset */			\
	mov	$LEN,%ecx;	/* len */	\
	lea	-12(%rbp),%rdx;			\
	lea	- MAX_BPF_STACK + 32(%rbp),%rdx;			\
	call	skb_copy_bits;			\
	test    %eax,%eax;			\
	pop	SKBDATA;			\
	pop	%r9;				\
	pop	%rdi
	pop	%r9;


bpf_slow_path_word:
	bpf_slow_path_common(4)
	js	bpf_error
	mov	-12(%rbp),%eax
	mov	- MAX_BPF_STACK + 32(%rbp),%eax
	bswap	%eax
	ret

bpf_slow_path_half:
	bpf_slow_path_common(2)
	js	bpf_error
	mov	-12(%rbp),%ax
	mov	- MAX_BPF_STACK + 32(%rbp),%ax
	rol	$8,%ax
	movzwl	%ax,%eax
	ret
@@ -122,21 +103,11 @@ bpf_slow_path_half:
bpf_slow_path_byte:
	bpf_slow_path_common(1)
	js	bpf_error
	movzbl	-12(%rbp),%eax
	ret

bpf_slow_path_byte_msh:
	xchg	%eax,%ebx /* dont lose A , X is about to be scratched */
	bpf_slow_path_common(1)
	js	bpf_error
	movzbl	-12(%rbp),%eax
	and	$15,%al
	shl	$2,%al
	xchg	%eax,%ebx
	movzbl	- MAX_BPF_STACK + 32(%rbp),%eax
	ret

#define sk_negative_common(SIZE)				\
	push	%rdi;	/* save skb */				\
	mov	%rbx, %rdi; /* arg1 == skb */			\
	push	%r9;						\
	push	SKBDATA;					\
/* rsi already has offset */					\
@@ -145,10 +116,8 @@ bpf_slow_path_byte_msh:
	test	%rax,%rax;					\
	pop	SKBDATA;					\
	pop	%r9;						\
	pop	%rdi;						\
	jz	bpf_error


bpf_slow_path_word_neg:
	cmp	SKF_MAX_NEG_OFF, %esi	/* test range */
	jl	bpf_error	/* offset lower -> error  */
@@ -179,22 +148,12 @@ sk_load_byte_negative_offset:
	movzbl	(%rax), %eax
	ret

bpf_slow_path_byte_msh_neg:
	cmp	SKF_MAX_NEG_OFF, %esi
	jl	bpf_error
sk_load_byte_msh_negative_offset:
	.globl	sk_load_byte_msh_negative_offset
	xchg	%eax,%ebx /* dont lose A , X is about to be scratched */
	sk_negative_common(1)
	movzbl	(%rax),%eax
	and	$15,%al
	shl	$2,%al
	xchg	%eax,%ebx
	ret

bpf_error:
# force a return 0 from jit handler
	xor	%eax,%eax
	mov		-8(%rbp),%rbx
	mov	- MAX_BPF_STACK(%rbp),%rbx
	mov	- MAX_BPF_STACK + 8(%rbp),%r13
	mov	- MAX_BPF_STACK + 16(%rbp),%r14
	mov	- MAX_BPF_STACK + 24(%rbp),%r15
	leaveq
	ret