Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a7bc5774 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'xdp-bpf-fixes'



John Fastabend says:

====================
net: Fixes for XDP/BPF

The following fixes, UAPI updates, and small improvement,

i. XDP needs to be called inside RCU with preempt disabled.

ii. Not strictly a bug fix but we have an attach command in the
sockmap UAPI already to avoid having a single kernel released with
only the attach and not the detach I'm pushing this into net branch.
Its early in the RC cycle so I think this is OK (not ideal but better
than supporting a UAPI with a missing detach forever).

iii. Final patch replace cpu_relax with cond_resched in devmap.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 109980b8 374fb014
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -385,14 +385,14 @@ static inline void __dev_map_flush(struct bpf_map *map)

#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL)
struct sock  *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
int sock_map_attach_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
#else
static inline struct sock  *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
{
	return NULL;
}

static inline int sock_map_attach_prog(struct bpf_map *map,
static inline int sock_map_prog(struct bpf_map *map,
				struct bpf_prog *prog,
				u32 type)
{
+1 −1
Original line number Diff line number Diff line
@@ -159,7 +159,7 @@ static void dev_map_free(struct bpf_map *map)
		unsigned long *bitmap = per_cpu_ptr(dtab->flush_needed, cpu);

		while (!bitmap_empty(bitmap, dtab->map.max_entries))
			cpu_relax();
			cond_resched();
	}

	for (i = 0; i < dtab->map.max_entries; i++) {
+1 −1
Original line number Diff line number Diff line
@@ -792,7 +792,7 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
	return err;
}

int sock_map_attach_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
{
	struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
	struct bpf_prog *orig;
+17 −10
Original line number Diff line number Diff line
@@ -1096,10 +1096,10 @@ static int bpf_obj_get(const union bpf_attr *attr)

#define BPF_PROG_ATTACH_LAST_FIELD attach_flags

static int sockmap_get_from_fd(const union bpf_attr *attr)
static int sockmap_get_from_fd(const union bpf_attr *attr, bool attach)
{
	struct bpf_prog *prog = NULL;
	int ufd = attr->target_fd;
	struct bpf_prog *prog;
	struct bpf_map *map;
	struct fd f;
	int err;
@@ -1109,15 +1109,19 @@ static int sockmap_get_from_fd(const union bpf_attr *attr)
	if (IS_ERR(map))
		return PTR_ERR(map);

	prog = bpf_prog_get_type(attr->attach_bpf_fd, BPF_PROG_TYPE_SK_SKB);
	if (attach) {
		prog = bpf_prog_get_type(attr->attach_bpf_fd,
					 BPF_PROG_TYPE_SK_SKB);
		if (IS_ERR(prog)) {
			fdput(f);
			return PTR_ERR(prog);
		}
	}

	err = sock_map_attach_prog(map, prog, attr->attach_type);
	err = sock_map_prog(map, prog, attr->attach_type);
	if (err) {
		fdput(f);
		if (prog)
			bpf_prog_put(prog);
		return err;
	}
@@ -1155,7 +1159,7 @@ static int bpf_prog_attach(const union bpf_attr *attr)
		break;
	case BPF_SK_SKB_STREAM_PARSER:
	case BPF_SK_SKB_STREAM_VERDICT:
		return sockmap_get_from_fd(attr);
		return sockmap_get_from_fd(attr, true);
	default:
		return -EINVAL;
	}
@@ -1204,7 +1208,10 @@ static int bpf_prog_detach(const union bpf_attr *attr)
		ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
		cgroup_put(cgrp);
		break;

	case BPF_SK_SKB_STREAM_PARSER:
	case BPF_SK_SKB_STREAM_VERDICT:
		ret = sockmap_get_from_fd(attr, false);
		break;
	default:
		return -EINVAL;
	}
+16 −9
Original line number Diff line number Diff line
@@ -3981,8 +3981,13 @@ static int netif_rx_internal(struct sk_buff *skb)
	trace_netif_rx(skb);

	if (static_key_false(&generic_xdp_needed)) {
		int ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog),
					 skb);
		int ret;

		preempt_disable();
		rcu_read_lock();
		ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
		rcu_read_unlock();
		preempt_enable();

		/* Consider XDP consuming the packet a success from
		 * the netdev point of view we do not want to count
@@ -4500,18 +4505,20 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
	if (skb_defer_rx_timestamp(skb))
		return NET_RX_SUCCESS;

	rcu_read_lock();

	if (static_key_false(&generic_xdp_needed)) {
		int ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog),
					 skb);
		int ret;

		if (ret != XDP_PASS) {
		preempt_disable();
		rcu_read_lock();
		ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
		rcu_read_unlock();
		preempt_enable();

		if (ret != XDP_PASS)
			return NET_RX_DROP;
	}
	}

	rcu_read_lock();
#ifdef CONFIG_RPS
	if (static_key_false(&rps_needed)) {
		struct rps_dev_flow voidflow, *rflow = &voidflow;
Loading