@@ -31,7 +31,7 @@ struct sock_reuseport {
extern int reuseport_alloc(struct sock *sk, bool bind_inany);
extern int reuseport_add_sock(struct sock *sk, struct sock *sk2,
bool bind_inany);
-extern void reuseport_detach_sock(struct sock *sk);
+extern struct sock *reuseport_detach_sock(struct sock *sk);
extern struct sock *reuseport_select_sock(struct sock *sk,
u32 hash,
struct sk_buff *skb,
@@ -184,9 +184,11 @@ int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany)
}
EXPORT_SYMBOL(reuseport_add_sock);
-void reuseport_detach_sock(struct sock *sk)
+struct sock *reuseport_detach_sock(struct sock *sk)
{
struct sock_reuseport *reuse;
+ struct bpf_prog *prog;
+ struct sock *nsk = NULL;
int i;
spin_lock_bh(&reuseport_lock);
@@ -215,17 +217,25 @@ void reuseport_detach_sock(struct sock *sk)
reuse->num_socks--;
reuse->socks[i] = reuse->socks[reuse->num_socks];
+ prog = rcu_dereference_protected(reuse->prog,
+ lockdep_is_held(&reuseport_lock));
+
+ if (sk->sk_protocol == IPPROTO_TCP) {
+ if (reuse->num_socks && !prog)
+ nsk = i == reuse->num_socks ? reuse->socks[i - 1] : reuse->socks[i];
- if (sk->sk_protocol == IPPROTO_TCP)
reuse->num_closed_socks++;
- else
+ } else {
rcu_assign_pointer(sk->sk_reuseport_cb, NULL);
+ }
}
if (reuse->num_socks + reuse->num_closed_socks == 0)
call_rcu(&reuse->rcu, reuseport_free_rcu);
spin_unlock_bh(&reuseport_lock);
+
+ return nsk;
}
EXPORT_SYMBOL(reuseport_detach_sock);
@@ -681,6 +681,7 @@ void inet_unhash(struct sock *sk)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
struct inet_listen_hashbucket *ilb = NULL;
+ struct sock *nsk;
spinlock_t *lock;
if (sk_unhashed(sk))
@@ -696,8 +697,12 @@ void inet_unhash(struct sock *sk)
if (sk_unhashed(sk))
goto unlock;
- if (rcu_access_pointer(sk->sk_reuseport_cb))
- reuseport_detach_sock(sk);
+ if (rcu_access_pointer(sk->sk_reuseport_cb)) {
+ nsk = reuseport_detach_sock(sk);
+ if (nsk)
+ inet_csk_reqsk_queue_migrate(sk, nsk);
+ }
+
if (ilb) {
inet_unhash2(hashinfo, sk);
ilb->count--;