Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bf006d18 authored by David S. Miller's avatar David S. Miller
Browse files


Daniel Borkmann says:

====================
pull-request: bpf 2018-02-20

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) Fix a memory leak in LPM trie's map_free() callback function, where
   the trie structure itself was not freed since initial implementation.
   Also a synchronize_rcu() was needed in order to wait for outstanding
   programs accessing the trie to complete, from Yonghong.

2) Fix sock_map_alloc()'s error path in order to correctly propagate
   the -EINVAL error in case of too large allocation requests. This
   was just recently introduced when fixing close hooks via ULP layer,
   fix from Eric.

3) Do not use GFP_ATOMIC in __cpu_map_entry_alloc(). Reason is that this
   will not work with the recent __ptr_ring_init_queue_alloc() conversion
   to kvmalloc_array(), where in case of fallback to vmalloc() that GFP
   flag is invalid, from Jason.

4) Fix two recent syzkaller warnings: i) fix bpf_prog_array_copy_to_user()
   when a prog query with a big number of ids was performed where we'd
   otherwise trigger a warning from allocator side, ii) fix a missing
   mlock precharge on arraymaps, from Daniel.

5) Two fixes for bpftool in order to avoid breaking JSON output when used
   in batch mode, from Quentin.

6) Move a pr_debug() in libbpf in order to avoid having an otherwise
   uninitialized variable in bpf_program__reloc_text(), from Jeremy.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 6c4df17c b1a2ce82
Loading
Loading
Loading
Loading
+16 −12
Original line number Diff line number Diff line
@@ -73,11 +73,11 @@ static int array_map_alloc_check(union bpf_attr *attr)
static struct bpf_map *array_map_alloc(union bpf_attr *attr)
{
	bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
	int numa_node = bpf_map_attr_numa_node(attr);
	int ret, numa_node = bpf_map_attr_numa_node(attr);
	u32 elem_size, index_mask, max_entries;
	bool unpriv = !capable(CAP_SYS_ADMIN);
	u64 cost, array_size, mask64;
	struct bpf_array *array;
	u64 array_size, mask64;

	elem_size = round_up(attr->value_size, 8);

@@ -109,8 +109,19 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
		array_size += (u64) max_entries * elem_size;

	/* make sure there is no u32 overflow later in round_up() */
	if (array_size >= U32_MAX - PAGE_SIZE)
	cost = array_size;
	if (cost >= U32_MAX - PAGE_SIZE)
		return ERR_PTR(-ENOMEM);
	if (percpu) {
		cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
		if (cost >= U32_MAX - PAGE_SIZE)
			return ERR_PTR(-ENOMEM);
	}
	cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;

	ret = bpf_map_precharge_memlock(cost);
	if (ret < 0)
		return ERR_PTR(ret);

	/* allocate all map elements and zero-initialize them */
	array = bpf_map_area_alloc(array_size, numa_node);
@@ -121,20 +132,13 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)

	/* copy mandatory map attributes */
	bpf_map_init_from_attr(&array->map, attr);
	array->map.pages = cost;
	array->elem_size = elem_size;

	if (!percpu)
		goto out;

	array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();

	if (array_size >= U32_MAX - PAGE_SIZE ||
	    bpf_array_alloc_percpu(array)) {
	if (percpu && bpf_array_alloc_percpu(array)) {
		bpf_map_area_free(array);
		return ERR_PTR(-ENOMEM);
	}
out:
	array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;

	return &array->map;
}
+1 −1
Original line number Diff line number Diff line
@@ -1590,7 +1590,7 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
	 * so always copy 'cnt' prog_ids to the user.
	 * In a rare race the user will see zero prog_ids
	 */
	ids = kcalloc(cnt, sizeof(u32), GFP_USER);
	ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
	if (!ids)
		return -ENOMEM;
	rcu_read_lock();
+1 −1
Original line number Diff line number Diff line
@@ -334,7 +334,7 @@ static int cpu_map_kthread_run(void *data)
static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
						       int map_id)
{
	gfp_t gfp = GFP_ATOMIC|__GFP_NOWARN;
	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
	struct bpf_cpu_map_entry *rcpu;
	int numa, err;

+7 −4
Original line number Diff line number Diff line
@@ -555,7 +555,10 @@ static void trie_free(struct bpf_map *map)
	struct lpm_trie_node __rcu **slot;
	struct lpm_trie_node *node;

	raw_spin_lock(&trie->lock);
	/* Wait for outstanding programs to complete
	 * update/lookup/delete/get_next_key and free the trie.
	 */
	synchronize_rcu();

	/* Always start at the root and walk down to a node that has no
	 * children. Then free that node, nullify its reference in the parent
@@ -569,7 +572,7 @@ static void trie_free(struct bpf_map *map)
			node = rcu_dereference_protected(*slot,
					lockdep_is_held(&trie->lock));
			if (!node)
				goto unlock;
				goto out;

			if (rcu_access_pointer(node->child[0])) {
				slot = &node->child[0];
@@ -587,8 +590,8 @@ static void trie_free(struct bpf_map *map)
		}
	}

unlock:
	raw_spin_unlock(&trie->lock);
out:
	kfree(trie);
}

static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key)
+2 −1
Original line number Diff line number Diff line
@@ -521,8 +521,8 @@ static struct smap_psock *smap_init_psock(struct sock *sock,
static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
{
	struct bpf_stab *stab;
	int err = -EINVAL;
	u64 cost;
	int err;

	if (!capable(CAP_NET_ADMIN))
		return ERR_PTR(-EPERM);
@@ -547,6 +547,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)

	/* make sure page count doesn't overflow */
	cost = (u64) stab->map.max_entries * sizeof(struct sock *);
	err = -EINVAL;
	if (cost >= U32_MAX - PAGE_SIZE)
		goto free_stab;

Loading