Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8034e1ef authored by David S. Miller's avatar David S. Miller
Browse files
parents bffae697 b16c2919
Loading
Loading
Loading
Loading
+3 −5
Original line number Diff line number Diff line
@@ -79,12 +79,10 @@ print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
            const struct nf_conntrack_l3proto *l3proto,
            const struct nf_conntrack_l4proto *proto);

#ifdef CONFIG_LOCKDEP
# define CONNTRACK_LOCKS 8
#else
#define CONNTRACK_LOCKS 1024
#endif

extern spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
void nf_conntrack_lock(spinlock_t *lock);

extern spinlock_t nf_conntrack_expect_lock;

+0 −4
Original line number Diff line number Diff line
@@ -164,8 +164,6 @@ hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb,
	};
	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);

	if (e.cidr == 0)
		return -EINVAL;
	if (adt == IPSET_TEST)
		e.cidr = HOST_MASK;

@@ -377,8 +375,6 @@ hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb,
	};
	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);

	if (e.cidr == 0)
		return -EINVAL;
	if (adt == IPSET_TEST)
		e.cidr = HOST_MASK;

+28 −10
Original line number Diff line number Diff line
@@ -66,6 +66,21 @@ EXPORT_SYMBOL_GPL(nf_conntrack_locks);
__cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);

static __read_mostly spinlock_t nf_conntrack_locks_all_lock;
static __read_mostly bool nf_conntrack_locks_all;

void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
{
	spin_lock(lock);
	while (unlikely(nf_conntrack_locks_all)) {
		spin_unlock(lock);
		spin_lock(&nf_conntrack_locks_all_lock);
		spin_unlock(&nf_conntrack_locks_all_lock);
		spin_lock(lock);
	}
}
EXPORT_SYMBOL_GPL(nf_conntrack_lock);

static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
{
	h1 %= CONNTRACK_LOCKS;
@@ -82,12 +97,12 @@ static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
	h1 %= CONNTRACK_LOCKS;
	h2 %= CONNTRACK_LOCKS;
	if (h1 <= h2) {
		spin_lock(&nf_conntrack_locks[h1]);
		nf_conntrack_lock(&nf_conntrack_locks[h1]);
		if (h1 != h2)
			spin_lock_nested(&nf_conntrack_locks[h2],
					 SINGLE_DEPTH_NESTING);
	} else {
		spin_lock(&nf_conntrack_locks[h2]);
		nf_conntrack_lock(&nf_conntrack_locks[h2]);
		spin_lock_nested(&nf_conntrack_locks[h1],
				 SINGLE_DEPTH_NESTING);
	}
@@ -102,16 +117,19 @@ static void nf_conntrack_all_lock(void)
{
	int i;

	for (i = 0; i < CONNTRACK_LOCKS; i++)
		spin_lock_nested(&nf_conntrack_locks[i], i);
	spin_lock(&nf_conntrack_locks_all_lock);
	nf_conntrack_locks_all = true;

	for (i = 0; i < CONNTRACK_LOCKS; i++) {
		spin_lock(&nf_conntrack_locks[i]);
		spin_unlock(&nf_conntrack_locks[i]);
	}
}

static void nf_conntrack_all_unlock(void)
{
	int i;

	for (i = 0; i < CONNTRACK_LOCKS; i++)
		spin_unlock(&nf_conntrack_locks[i]);
	nf_conntrack_locks_all = false;
	spin_unlock(&nf_conntrack_locks_all_lock);
}

unsigned int nf_conntrack_htable_size __read_mostly;
@@ -757,7 +775,7 @@ static noinline int early_drop(struct net *net, unsigned int _hash)
	hash = hash_bucket(_hash, net);
	for (; i < net->ct.htable_size; i++) {
		lockp = &nf_conntrack_locks[hash % CONNTRACK_LOCKS];
		spin_lock(lockp);
		nf_conntrack_lock(lockp);
		if (read_seqcount_retry(&net->ct.generation, sequence)) {
			spin_unlock(lockp);
			goto restart;
@@ -1382,7 +1400,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
	for (; *bucket < net->ct.htable_size; (*bucket)++) {
		lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
		local_bh_disable();
		spin_lock(lockp);
		nf_conntrack_lock(lockp);
		if (*bucket < net->ct.htable_size) {
			hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
				if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
+1 −1
Original line number Diff line number Diff line
@@ -425,7 +425,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
	}
	local_bh_disable();
	for (i = 0; i < net->ct.htable_size; i++) {
		spin_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
		nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
		if (i < net->ct.htable_size) {
			hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
				unhelp(h, me);
+1 −1
Original line number Diff line number Diff line
@@ -840,7 +840,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
	for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
restart:
		lockp = &nf_conntrack_locks[cb->args[0] % CONNTRACK_LOCKS];
		spin_lock(lockp);
		nf_conntrack_lock(lockp);
		if (cb->args[0] >= net->ct.htable_size) {
			spin_unlock(lockp);
			goto out;
Loading