Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1268e253 authored by David S. Miller's avatar David S. Miller
Browse files

net: filter: Fix redefinition warnings on x86-64.



Do not collide with the x86-64 PTRACE user API namespace.

net/core/filter.c:57:0: warning: "R8" redefined [enabled by default]
arch/x86/include/uapi/asm/ptrace-abi.h:38:0: note: this is the location of the previous definition

Fix by adding a BPF_ prefix to the register macros.

Reported-by: default avatarRandy Dunlap <rdunlap@infradead.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 62555582
Loading
Loading
Loading
Loading
+37 −34
Original line number Original line Diff line number Diff line
@@ -46,17 +46,17 @@
#include <linux/if_vlan.h>
#include <linux/if_vlan.h>


/* Registers */
/* Registers */
#define R0	regs[BPF_REG_0]
#define BPF_R0	regs[BPF_REG_0]
#define R1	regs[BPF_REG_1]
#define BPF_R1	regs[BPF_REG_1]
#define R2	regs[BPF_REG_2]
#define BPF_R2	regs[BPF_REG_2]
#define R3	regs[BPF_REG_3]
#define BPF_R3	regs[BPF_REG_3]
#define R4	regs[BPF_REG_4]
#define BPF_R4	regs[BPF_REG_4]
#define R5	regs[BPF_REG_5]
#define BPF_R5	regs[BPF_REG_5]
#define R6	regs[BPF_REG_6]
#define BPF_R6	regs[BPF_REG_6]
#define R7	regs[BPF_REG_7]
#define BPF_R7	regs[BPF_REG_7]
#define R8	regs[BPF_REG_8]
#define BPF_R8	regs[BPF_REG_8]
#define R9	regs[BPF_REG_9]
#define BPF_R9	regs[BPF_REG_9]
#define R10	regs[BPF_REG_10]
#define BPF_R10	regs[BPF_REG_10]


/* Named registers */
/* Named registers */
#define A	regs[insn->a_reg]
#define A	regs[insn->a_reg]
@@ -383,10 +383,12 @@ unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)


	/* CALL */
	/* CALL */
	JMP_CALL_0:
	JMP_CALL_0:
		/* Function call scratches R1-R5 registers, preserves R6-R9,
		/* Function call scratches BPF_R1-BPF_R5 registers,
		 * and stores return value into R0.
		 * preserves BPF_R6-BPF_R9, and stores return value
		 * into BPF_R0.
		 */
		 */
		R0 = (__bpf_call_base + insn->imm)(R1, R2, R3, R4, R5);
		BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
						       BPF_R4, BPF_R5);
		CONT;
		CONT;


	/* JMP */
	/* JMP */
@@ -478,7 +480,7 @@ unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
		}
		}
		CONT;
		CONT;
	JMP_EXIT_0:
	JMP_EXIT_0:
		return R0;
		return BPF_R0;


	/* STX and ST and LDX*/
	/* STX and ST and LDX*/
#define LDST(SIZEOP, SIZE)					\
#define LDST(SIZEOP, SIZE)					\
@@ -505,18 +507,19 @@ unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
		atomic64_add((u64) X, (atomic64_t *)(unsigned long)
		atomic64_add((u64) X, (atomic64_t *)(unsigned long)
			     (A + insn->off));
			     (A + insn->off));
		CONT;
		CONT;
	LD_ABS_W: /* R0 = ntohl(*(u32 *) (skb->data + K)) */
	LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + K)) */
		off = K;
		off = K;
load_word:
load_word:
		/* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
		/* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
		 * appearing in the programs where ctx == skb. All programs
		 * only appearing in the programs where ctx ==
		 * keep 'ctx' in regs[BPF_REG_CTX] == R6, sk_convert_filter()
		 * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
		 * saves it in R6, internal BPF verifier will check that
		 * == BPF_R6, sk_convert_filter() saves it in BPF_R6,
		 * R6 == ctx.
		 * internal BPF verifier will check that BPF_R6 ==
		 * ctx.
		 *
		 *
		 * BPF_ABS and BPF_IND are wrappers of function calls, so
		 * BPF_ABS and BPF_IND are wrappers of function calls,
		 * they scratch R1-R5 registers, preserve R6-R9, and store
		 * so they scratch BPF_R1-BPF_R5 registers, preserve
		 * return value into R0.
		 * BPF_R6-BPF_R9, and store return value into BPF_R0.
		 *
		 *
		 * Implicit input:
		 * Implicit input:
		 *   ctx
		 *   ctx
@@ -526,39 +529,39 @@ unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
		 *   K == 32-bit immediate
		 *   K == 32-bit immediate
		 *
		 *
		 * Output:
		 * Output:
		 *   R0 - 8/16/32-bit skb data converted to cpu endianness
		 *   BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
		 */
		 */
		ptr = load_pointer((struct sk_buff *) ctx, off, 4, &tmp);
		ptr = load_pointer((struct sk_buff *) ctx, off, 4, &tmp);
		if (likely(ptr != NULL)) {
		if (likely(ptr != NULL)) {
			R0 = get_unaligned_be32(ptr);
			BPF_R0 = get_unaligned_be32(ptr);
			CONT;
			CONT;
		}
		}
		return 0;
		return 0;
	LD_ABS_H: /* R0 = ntohs(*(u16 *) (skb->data + K)) */
	LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + K)) */
		off = K;
		off = K;
load_half:
load_half:
		ptr = load_pointer((struct sk_buff *) ctx, off, 2, &tmp);
		ptr = load_pointer((struct sk_buff *) ctx, off, 2, &tmp);
		if (likely(ptr != NULL)) {
		if (likely(ptr != NULL)) {
			R0 = get_unaligned_be16(ptr);
			BPF_R0 = get_unaligned_be16(ptr);
			CONT;
			CONT;
		}
		}
		return 0;
		return 0;
	LD_ABS_B: /* R0 = *(u8 *) (ctx + K) */
	LD_ABS_B: /* BPF_R0 = *(u8 *) (ctx + K) */
		off = K;
		off = K;
load_byte:
load_byte:
		ptr = load_pointer((struct sk_buff *) ctx, off, 1, &tmp);
		ptr = load_pointer((struct sk_buff *) ctx, off, 1, &tmp);
		if (likely(ptr != NULL)) {
		if (likely(ptr != NULL)) {
			R0 = *(u8 *)ptr;
			BPF_R0 = *(u8 *)ptr;
			CONT;
			CONT;
		}
		}
		return 0;
		return 0;
	LD_IND_W: /* R0 = ntohl(*(u32 *) (skb->data + X + K)) */
	LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + X + K)) */
		off = K + X;
		off = K + X;
		goto load_word;
		goto load_word;
	LD_IND_H: /* R0 = ntohs(*(u16 *) (skb->data + X + K)) */
	LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + X + K)) */
		off = K + X;
		off = K + X;
		goto load_half;
		goto load_half;
	LD_IND_B: /* R0 = *(u8 *) (skb->data + X + K) */
	LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + X + K) */
		off = K + X;
		off = K + X;
		goto load_byte;
		goto load_byte;


@@ -1001,7 +1004,7 @@ int sk_convert_filter(struct sock_filter *prog, int len,
			*insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_TMP, BPF_REG_A);
			*insn = BPF_ALU64_REG(BPF_MOV, BPF_REG_TMP, BPF_REG_A);
			insn++;
			insn++;


			/* A = R0 = *(u8 *) (skb->data + K) */
			/* A = BPF_R0 = *(u8 *) (skb->data + K) */
			*insn = BPF_LD_ABS(BPF_B, fp->k);
			*insn = BPF_LD_ABS(BPF_B, fp->k);
			insn++;
			insn++;