diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 8f1927d56d80..1ab36a72d93e 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -934,6 +934,14 @@ struct sock *inet_csk_reqsk_queue_add(struct sock *sk, spin_lock(&queue->rskq_lock); if (unlikely(sk->sk_state != TCP_LISTEN)) { + struct tcp_sock *tp = tcp_sk(sk); + + /* in case of mptcp, two locks may been taken, one + * on the meta, the other on master_sk + */ + if (mptcp(tp) && tp->mpcb && tp->mpcb->master_sk) + bh_unlock_sock(tp->mpcb->master_sk); + inet_child_forget(sk, req, child); child = NULL; } else { diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index bf212158ff2e..e05c800bef60 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -6860,9 +6860,11 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, /* Add the child socket directly into the accept queue */ if (!inet_csk_reqsk_queue_add(sk, req, meta_sk)) { reqsk_fastopen_remove(fastopen_sk, req, false); - bh_unlock_sock(fastopen_sk); - if (meta_sk != fastopen_sk) - bh_unlock_sock(meta_sk); + /* in the case of mptcp, on failure, the master subflow + * socket (==fastopen_sk) will already have been unlocked + * by the failed call to inet_csk_reqsk_queue_add + */ + bh_unlock_sock(meta_sk); sock_put(fastopen_sk); goto drop_and_free; } diff --git a/net/mptcp/mptcp_ctrl.c b/net/mptcp/mptcp_ctrl.c index d6b6ac5dfe0f..1de63d1af310 100644 --- a/net/mptcp/mptcp_ctrl.c +++ b/net/mptcp/mptcp_ctrl.c @@ -2116,9 +2116,6 @@ void mptcp_disconnect(struct sock *meta_sk) mptcp_for_each_sub_safe(meta_tp->mpcb, mptcp, tmp) { struct sock *subsk = mptcp_to_sock(mptcp); - if (spin_is_locked(&subsk->sk_lock.slock)) - bh_unlock_sock(subsk); - tcp_sk(subsk)->tcp_disconnect = 1; meta_sk->sk_prot->disconnect(subsk, O_NONBLOCK);