Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c7d5ec26 authored by David S. Miller's avatar David S. Miller
Browse files


Daniel Borkmann says:

====================
pull-request: bpf 2019-05-16

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) Fix a use after free in __dev_map_entry_free(), from Eric.

2) Several sockmap related bug fixes: a splat in strparser if
   it was never initialized, remove duplicate ingress msg list
   purging which can race, fix msg->sg.size accounting upon
   skb to msg conversion, and last but not least fix a timeout
   bug in tcp_bpf_wait_data(), from John.

3) Fix LRU map to avoid messing with eviction heuristics upon
   syscall lookup, e.g. map walks from user space side will
   then lead to eviction of just recently created entries on
   updates as it would mark all map entries, from Daniel.

4) Don't bail out when libbpf feature probing fails. Also
   various smaller fixes to flow_dissector test, from Stanislav.

5) Fix missing brackets for BTF_INT_OFFSET() in UAPI, from Gary.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 858f5017 5fa2ca7c
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -131,7 +131,7 @@ The following sections detail encoding of each kind.
``btf_type`` is followed by a ``u32`` with the following bits arrangement::

  #define BTF_INT_ENCODING(VAL)   (((VAL) & 0x0f000000) >> 24)
  #define BTF_INT_OFFSET(VAL)     (((VAL  & 0x00ff0000)) >> 16)
  #define BTF_INT_OFFSET(VAL)     (((VAL) & 0x00ff0000) >> 16)
  #define BTF_INT_BITS(VAL)       ((VAL)  & 0x000000ff)

The ``BTF_INT_ENCODING`` has the following attributes::
+1 −0
Original line number Diff line number Diff line
@@ -36,6 +36,7 @@ struct bpf_map_ops {
	void (*map_free)(struct bpf_map *map);
	int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
	void (*map_release_uref)(struct bpf_map *map);
	void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);

	/* funcs callable from userspace and from eBPF programs */
	void *(*map_lookup_elem)(struct bpf_map *map, void *key);
+1 −1
Original line number Diff line number Diff line
@@ -83,7 +83,7 @@ struct btf_type {
 * is the 32 bits arrangement:
 */
#define BTF_INT_ENCODING(VAL)	(((VAL) & 0x0f000000) >> 24)
#define BTF_INT_OFFSET(VAL)	(((VAL  & 0x00ff0000)) >> 16)
#define BTF_INT_OFFSET(VAL)	(((VAL) & 0x00ff0000) >> 16)
#define BTF_INT_BITS(VAL)	((VAL)  & 0x000000ff)

/* Attributes stored in the BTF_INT_ENCODING */
+3 −0
Original line number Diff line number Diff line
@@ -164,6 +164,9 @@ static void dev_map_free(struct bpf_map *map)
	bpf_clear_redirect_map(map);
	synchronize_rcu();

	/* Make sure prior __dev_map_entry_free() have completed. */
	rcu_barrier();

	/* To ensure all pending flush operations have completed wait for flush
	 * bitmap to indicate all flush_needed bits to be zero on _all_ cpus.
	 * Because the above synchronize_rcu() ensures the map is disconnected
+18 −5
Original line number Diff line number Diff line
@@ -527,11 +527,13 @@ static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
	return insn - insn_buf;
}

static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
							void *key, const bool mark)
{
	struct htab_elem *l = __htab_map_lookup_elem(map, key);

	if (l) {
		if (mark)
			bpf_lru_node_set_ref(&l->lru_node);
		return l->key + round_up(map->key_size, 8);
	}
@@ -539,6 +541,16 @@ static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
	return NULL;
}

static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
{
	return __htab_lru_map_lookup_elem(map, key, true);
}

static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
{
	return __htab_lru_map_lookup_elem(map, key, false);
}

static u32 htab_lru_map_gen_lookup(struct bpf_map *map,
				   struct bpf_insn *insn_buf)
{
@@ -1250,6 +1262,7 @@ const struct bpf_map_ops htab_lru_map_ops = {
	.map_free = htab_map_free,
	.map_get_next_key = htab_map_get_next_key,
	.map_lookup_elem = htab_lru_map_lookup_elem,
	.map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys,
	.map_update_elem = htab_lru_map_update_elem,
	.map_delete_elem = htab_lru_map_delete_elem,
	.map_gen_lookup = htab_lru_map_gen_lookup,
@@ -1281,7 +1294,6 @@ static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)

int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
{
	struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
	struct htab_elem *l;
	void __percpu *pptr;
	int ret = -ENOENT;
@@ -1297,8 +1309,9 @@ int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
	l = __htab_map_lookup_elem(map, key);
	if (!l)
		goto out;
	if (htab_is_lru(htab))
		bpf_lru_node_set_ref(&l->lru_node);
	/* We do not mark LRU map element here in order to not mess up
	 * eviction heuristics when user space does a map walk.
	 */
	pptr = htab_elem_get_ptr(l, map->key_size);
	for_each_possible_cpu(cpu) {
		bpf_long_memcpy(value + off,
Loading