Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e7af85db authored by David S. Miller's avatar David S. Miller
Browse files


Pablo Neira Ayuso says:

====================
nf pull request for net

This series contains netfilter fixes for net, they are:

1) Fix lockdep splat in nft_hash when releasing sets from the
   rcu_callback context. We don't the mutex there anymore.

2) Remove unnecessary spinlock_bh in the destroy path of the nf_tables
   rbtree set type from rcu_callback context.

3) Fix another lockdep splat in rhashtable. None of the callers hold
   a mutex when calling rhashtable_destroy.

4) Fix duplicated error reporting from nfnetlink when aborting and
   replaying a batch.

5) Fix a Kconfig issue reported by kbuild robot.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 445f7f4d 679ab4dd
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -588,13 +588,13 @@ EXPORT_SYMBOL_GPL(rhashtable_init);
 * rhashtable_destroy - destroy hash table
 * @ht:		the hash table to destroy
 *
 * Frees the bucket array.
 * Frees the bucket array. This function is not rcu safe, therefore the caller
 * has to make sure that no resizing may happen by unpublishing the hashtable
 * and waiting for the quiescent cycle before releasing the bucket array.
 */
void rhashtable_destroy(const struct rhashtable *ht)
{
	const struct bucket_table *tbl = rht_dereference(ht->tbl, ht);

	bucket_table_free(tbl);
	bucket_table_free(ht->tbl);
}
EXPORT_SYMBOL_GPL(rhashtable_destroy);

+1 −0
Original line number Diff line number Diff line
@@ -847,6 +847,7 @@ config NETFILTER_XT_TARGET_TPROXY
	tristate '"TPROXY" target transparent proxying support'
	depends on NETFILTER_XTABLES
	depends on NETFILTER_ADVANCED
	depends on (IPV6 || IPV6=n)
	depends on IP_NF_MANGLE
	select NF_DEFRAG_IPV4
	select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
+63 −1
Original line number Diff line number Diff line
@@ -222,6 +222,51 @@ static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
	}
}

struct nfnl_err {
	struct list_head	head;
	struct nlmsghdr		*nlh;
	int			err;
};

static int nfnl_err_add(struct list_head *list, struct nlmsghdr *nlh, int err)
{
	struct nfnl_err *nfnl_err;

	nfnl_err = kmalloc(sizeof(struct nfnl_err), GFP_KERNEL);
	if (nfnl_err == NULL)
		return -ENOMEM;

	nfnl_err->nlh = nlh;
	nfnl_err->err = err;
	list_add_tail(&nfnl_err->head, list);

	return 0;
}

static void nfnl_err_del(struct nfnl_err *nfnl_err)
{
	list_del(&nfnl_err->head);
	kfree(nfnl_err);
}

static void nfnl_err_reset(struct list_head *err_list)
{
	struct nfnl_err *nfnl_err, *next;

	list_for_each_entry_safe(nfnl_err, next, err_list, head)
		nfnl_err_del(nfnl_err);
}

static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb)
{
	struct nfnl_err *nfnl_err, *next;

	list_for_each_entry_safe(nfnl_err, next, err_list, head) {
		netlink_ack(skb, nfnl_err->nlh, nfnl_err->err);
		nfnl_err_del(nfnl_err);
	}
}

static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
				u_int16_t subsys_id)
{
@@ -230,6 +275,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
	const struct nfnetlink_subsystem *ss;
	const struct nfnl_callback *nc;
	bool success = true, done = false;
	static LIST_HEAD(err_list);
	int err;

	if (subsys_id >= NFNL_SUBSYS_COUNT)
@@ -287,6 +333,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
		type = nlh->nlmsg_type;
		if (type == NFNL_MSG_BATCH_BEGIN) {
			/* Malformed: Batch begin twice */
			nfnl_err_reset(&err_list);
			success = false;
			goto done;
		} else if (type == NFNL_MSG_BATCH_END) {
@@ -333,6 +380,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
			 * original skb.
			 */
			if (err == -EAGAIN) {
				nfnl_err_reset(&err_list);
				ss->abort(skb);
				nfnl_unlock(subsys_id);
				kfree_skb(nskb);
@@ -341,11 +389,24 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
		}
ack:
		if (nlh->nlmsg_flags & NLM_F_ACK || err) {
			/* Errors are delivered once the full batch has been
			 * processed, this avoids that the same error is
			 * reported several times when replaying the batch.
			 */
			if (nfnl_err_add(&err_list, nlh, err) < 0) {
				/* We failed to enqueue an error, reset the
				 * list of errors and send OOM to userspace
				 * pointing to the batch header.
				 */
				nfnl_err_reset(&err_list);
				netlink_ack(skb, nlmsg_hdr(oskb), -ENOMEM);
				success = false;
				goto done;
			}
			/* We don't stop processing the batch on errors, thus,
			 * userspace gets all the errors that the batch
			 * triggers.
			 */
			netlink_ack(skb, nlh, err);
			if (err)
				success = false;
		}
@@ -361,6 +422,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
	else
		ss->abort(skb);

	nfnl_err_deliver(&err_list, oskb);
	nfnl_unlock(subsys_id);
	kfree_skb(nskb);
}
+7 −5
Original line number Diff line number Diff line
@@ -180,15 +180,17 @@ static int nft_hash_init(const struct nft_set *set,
static void nft_hash_destroy(const struct nft_set *set)
{
	const struct rhashtable *priv = nft_set_priv(set);
	const struct bucket_table *tbl;
	const struct bucket_table *tbl = priv->tbl;
	struct nft_hash_elem *he, *next;
	unsigned int i;

	tbl = rht_dereference(priv->tbl, priv);
	for (i = 0; i < tbl->size; i++)
		rht_for_each_entry_safe(he, next, tbl->buckets[i], priv, node)
	for (i = 0; i < tbl->size; i++) {
		for (he = rht_entry(tbl->buckets[i], struct nft_hash_elem, node);
		     he != NULL; he = next) {
			next = rht_entry(he->node.next, struct nft_hash_elem, node);
			nft_hash_elem_destroy(set, he);

		}
	}
	rhashtable_destroy(priv);
}

+0 −2
Original line number Diff line number Diff line
@@ -234,13 +234,11 @@ static void nft_rbtree_destroy(const struct nft_set *set)
	struct nft_rbtree_elem *rbe;
	struct rb_node *node;

	spin_lock_bh(&nft_rbtree_lock);
	while ((node = priv->root.rb_node) != NULL) {
		rb_erase(node, &priv->root);
		rbe = rb_entry(node, struct nft_rbtree_elem, node);
		nft_rbtree_elem_destroy(set, rbe);
	}
	spin_unlock_bh(&nft_rbtree_lock);
}

static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,