Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1fbbe1a8 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'sock-lockdep-tightening'



Hannes Frederic Sowa says:

====================
sock: lockdep tightening

First patch is from Eric Dumazet and improves lockdep accuracy for
socket locks. After that, second patch introduces lockdep_sock_is_held
and uses it. Final patch reverts and reworks the lockdep fix from Daniel
in the filter code, as we now have tighter lockdep support.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 85017869 8ced425e
Loading
Loading
Loading
Loading
+9 −5
Original line number Original line Diff line number Diff line
@@ -622,8 +622,9 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte


	/* Re-attach the filter to persist device */
	/* Re-attach the filter to persist device */
	if (!skip_filter && (tun->filter_attached == true)) {
	if (!skip_filter && (tun->filter_attached == true)) {
		err = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
		lock_sock(tfile->socket.sk);
					 lockdep_rtnl_is_held());
		err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
		release_sock(tfile->socket.sk);
		if (!err)
		if (!err)
			goto out;
			goto out;
	}
	}
@@ -1824,7 +1825,9 @@ static void tun_detach_filter(struct tun_struct *tun, int n)


	for (i = 0; i < n; i++) {
	for (i = 0; i < n; i++) {
		tfile = rtnl_dereference(tun->tfiles[i]);
		tfile = rtnl_dereference(tun->tfiles[i]);
		__sk_detach_filter(tfile->socket.sk, lockdep_rtnl_is_held());
		lock_sock(tfile->socket.sk);
		sk_detach_filter(tfile->socket.sk);
		release_sock(tfile->socket.sk);
	}
	}


	tun->filter_attached = false;
	tun->filter_attached = false;
@@ -1837,8 +1840,9 @@ static int tun_attach_filter(struct tun_struct *tun)


	for (i = 0; i < tun->numqueues; i++) {
	for (i = 0; i < tun->numqueues; i++) {
		tfile = rtnl_dereference(tun->tfiles[i]);
		tfile = rtnl_dereference(tun->tfiles[i]);
		ret = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
		lock_sock(tfile->socket.sk);
					 lockdep_rtnl_is_held());
		ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
		release_sock(tfile->socket.sk);
		if (ret) {
		if (ret) {
			tun_detach_filter(tun, i);
			tun_detach_filter(tun, i);
			return ret;
			return ret;
+0 −4
Original line number Original line Diff line number Diff line
@@ -465,14 +465,10 @@ int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
void bpf_prog_destroy(struct bpf_prog *fp);
void bpf_prog_destroy(struct bpf_prog *fp);


int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
		       bool locked);
int sk_attach_bpf(u32 ufd, struct sock *sk);
int sk_attach_bpf(u32 ufd, struct sock *sk);
int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
int sk_detach_filter(struct sock *sk);
int sk_detach_filter(struct sock *sk);
int __sk_detach_filter(struct sock *sk, bool locked);

int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
		  unsigned int len);
		  unsigned int len);


+16 −3
Original line number Original line Diff line number Diff line
@@ -1333,7 +1333,12 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)


static inline void sock_release_ownership(struct sock *sk)
static inline void sock_release_ownership(struct sock *sk)
{
{
	if (sk->sk_lock.owned) {
		sk->sk_lock.owned = 0;
		sk->sk_lock.owned = 0;

		/* The sk_lock has mutex_unlock() semantics: */
		mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
	}
}
}


/*
/*
@@ -1355,6 +1360,14 @@ do { \
	lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0);	\
	lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0);	\
} while (0)
} while (0)


static bool lockdep_sock_is_held(const struct sock *csk)
{
	struct sock *sk = (struct sock *)csk;

	return lockdep_is_held(&sk->sk_lock) ||
	       lockdep_is_held(&sk->sk_lock.slock);
}

void lock_sock_nested(struct sock *sk, int subclass);
void lock_sock_nested(struct sock *sk, int subclass);


static inline void lock_sock(struct sock *sk)
static inline void lock_sock(struct sock *sk)
@@ -1593,8 +1606,8 @@ static inline void sk_rethink_txhash(struct sock *sk)
static inline struct dst_entry *
static inline struct dst_entry *
__sk_dst_get(struct sock *sk)
__sk_dst_get(struct sock *sk)
{
{
	return rcu_dereference_check(sk->sk_dst_cache, sock_owned_by_user(sk) ||
	return rcu_dereference_check(sk->sk_dst_cache,
						       lockdep_is_held(&sk->sk_lock.slock));
				     lockdep_sock_is_held(sk));
}
}


static inline struct dst_entry *
static inline struct dst_entry *
+13 −22
Original line number Original line Diff line number Diff line
@@ -1149,8 +1149,7 @@ void bpf_prog_destroy(struct bpf_prog *fp)
}
}
EXPORT_SYMBOL_GPL(bpf_prog_destroy);
EXPORT_SYMBOL_GPL(bpf_prog_destroy);


static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk,
static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
			    bool locked)
{
{
	struct sk_filter *fp, *old_fp;
	struct sk_filter *fp, *old_fp;


@@ -1166,8 +1165,10 @@ static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk,
		return -ENOMEM;
		return -ENOMEM;
	}
	}


	old_fp = rcu_dereference_protected(sk->sk_filter, locked);
	old_fp = rcu_dereference_protected(sk->sk_filter,
					   lockdep_sock_is_held(sk));
	rcu_assign_pointer(sk->sk_filter, fp);
	rcu_assign_pointer(sk->sk_filter, fp);

	if (old_fp)
	if (old_fp)
		sk_filter_uncharge(sk, old_fp);
		sk_filter_uncharge(sk, old_fp);


@@ -1246,8 +1247,7 @@ struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
 * occurs or there is insufficient memory for the filter a negative
 * occurs or there is insufficient memory for the filter a negative
 * errno code is returned. On success the return is zero.
 * errno code is returned. On success the return is zero.
 */
 */
int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
		       bool locked)
{
{
	struct bpf_prog *prog = __get_filter(fprog, sk);
	struct bpf_prog *prog = __get_filter(fprog, sk);
	int err;
	int err;
@@ -1255,7 +1255,7 @@ int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,
	if (IS_ERR(prog))
	if (IS_ERR(prog))
		return PTR_ERR(prog);
		return PTR_ERR(prog);


	err = __sk_attach_prog(prog, sk, locked);
	err = __sk_attach_prog(prog, sk);
	if (err < 0) {
	if (err < 0) {
		__bpf_prog_release(prog);
		__bpf_prog_release(prog);
		return err;
		return err;
@@ -1263,12 +1263,7 @@ int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk,


	return 0;
	return 0;
}
}
EXPORT_SYMBOL_GPL(__sk_attach_filter);
EXPORT_SYMBOL_GPL(sk_attach_filter);

int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
{
	return __sk_attach_filter(fprog, sk, sock_owned_by_user(sk));
}


int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
{
{
@@ -1314,7 +1309,7 @@ int sk_attach_bpf(u32 ufd, struct sock *sk)
	if (IS_ERR(prog))
	if (IS_ERR(prog))
		return PTR_ERR(prog);
		return PTR_ERR(prog);


	err = __sk_attach_prog(prog, sk, sock_owned_by_user(sk));
	err = __sk_attach_prog(prog, sk);
	if (err < 0) {
	if (err < 0) {
		bpf_prog_put(prog);
		bpf_prog_put(prog);
		return err;
		return err;
@@ -2255,7 +2250,7 @@ static int __init register_sk_filter_ops(void)
}
}
late_initcall(register_sk_filter_ops);
late_initcall(register_sk_filter_ops);


int __sk_detach_filter(struct sock *sk, bool locked)
int sk_detach_filter(struct sock *sk)
{
{
	int ret = -ENOENT;
	int ret = -ENOENT;
	struct sk_filter *filter;
	struct sk_filter *filter;
@@ -2263,7 +2258,8 @@ int __sk_detach_filter(struct sock *sk, bool locked)
	if (sock_flag(sk, SOCK_FILTER_LOCKED))
	if (sock_flag(sk, SOCK_FILTER_LOCKED))
		return -EPERM;
		return -EPERM;


	filter = rcu_dereference_protected(sk->sk_filter, locked);
	filter = rcu_dereference_protected(sk->sk_filter,
					   lockdep_sock_is_held(sk));
	if (filter) {
	if (filter) {
		RCU_INIT_POINTER(sk->sk_filter, NULL);
		RCU_INIT_POINTER(sk->sk_filter, NULL);
		sk_filter_uncharge(sk, filter);
		sk_filter_uncharge(sk, filter);
@@ -2272,12 +2268,7 @@ int __sk_detach_filter(struct sock *sk, bool locked)


	return ret;
	return ret;
}
}
EXPORT_SYMBOL_GPL(__sk_detach_filter);
EXPORT_SYMBOL_GPL(sk_detach_filter);

int sk_detach_filter(struct sock *sk)
{
	return __sk_detach_filter(sk, sock_owned_by_user(sk));
}


int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
		  unsigned int len)
		  unsigned int len)
@@ -2288,7 +2279,7 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,


	lock_sock(sk);
	lock_sock(sk);
	filter = rcu_dereference_protected(sk->sk_filter,
	filter = rcu_dereference_protected(sk->sk_filter,
					   sock_owned_by_user(sk));
					   lockdep_sock_is_held(sk));
	if (!filter)
	if (!filter)
		goto out;
		goto out;


+0 −5
Original line number Original line Diff line number Diff line
@@ -2483,11 +2483,6 @@ EXPORT_SYMBOL(lock_sock_nested);


void release_sock(struct sock *sk)
void release_sock(struct sock *sk)
{
{
	/*
	 * The sk_lock has mutex_unlock() semantics:
	 */
	mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);

	spin_lock_bh(&sk->sk_lock.slock);
	spin_lock_bh(&sk->sk_lock.slock);
	if (sk->sk_backlog.tail)
	if (sk->sk_backlog.tail)
		__release_sock(sk);
		__release_sock(sk);
Loading