@@ -1382,6 +1382,11 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow)
spin_unlock_bh(&sk->sk_lock.slock);
}
+static bool lockdep_sock_is_held(struct sock *sk)
+{
+ return lockdep_is_held(&sk->sk_lock) ||
+ lockdep_is_held(&sk->sk_lock.slock);
+}
struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
struct proto *prot, int kern);
@@ -1166,7 +1166,7 @@ static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
}
old_fp = rcu_dereference_protected(sk->sk_filter,
- sock_owned_by_user(sk));
+ lockdep_sock_is_held(sk));
rcu_assign_pointer(sk->sk_filter, fp);
if (old_fp)
@@ -2259,7 +2259,7 @@ int sk_detach_filter(struct sock *sk)
return -EPERM;
filter = rcu_dereference_protected(sk->sk_filter,
- sock_owned_by_user(sk));
+ lockdep_sock_is_held(sk));
if (filter) {
RCU_INIT_POINTER(sk->sk_filter, NULL);
sk_filter_uncharge(sk, filter);
@@ -2279,7 +2279,7 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
lock_sock(sk);
filter = rcu_dereference_protected(sk->sk_filter,
- sock_owned_by_user(sk));
+ lockdep_sock_is_held(sk));
if (!filter)
goto out;
lockdep_sock_is_held makes sure that we currently own the lock. sock_owned_by_user simply checks if a user holds the socket. This could lead to non deterministic lock checks. Reported-by: Sasha Levin <sasha.levin@oracle.com> Cc: Daniel Borkmann <daniel@iogearbox.net> Cc: Alexei Starovoitov <alexei.starovoitov@gmail.com> Cc: Michal Kubecek <mkubecek@suse.cz> Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org> --- include/net/sock.h | 5 +++++ net/core/filter.c | 6 +++--- 2 files changed, 8 insertions(+), 3 deletions(-)