Skip to content
This repository has been archived by the owner on Apr 18, 2024. It is now read-only.

Commit

Permalink
mptcp: Cleanup mptcp_init_buffer_space
Browse files Browse the repository at this point in the history
We can make it static by moving it to mptcp_ctrl.c

Signed-off-by: Christoph Paasch <[email protected]>
Signed-off-by: Matthieu Baerts <[email protected]>
  • Loading branch information
cpaasch authored and matttbe committed Feb 9, 2018
1 parent 8a1051d commit 3957d58
Show file tree
Hide file tree
Showing 3 changed files with 58 additions and 60 deletions.
2 changes: 0 additions & 2 deletions include/net/mptcp.h
Original file line number Diff line number Diff line change
Expand Up @@ -866,7 +866,6 @@ void mptcp_reqsk_destructor(struct request_sock *req);
void mptcp_connect_init(struct sock *sk);
void mptcp_sub_force_close(struct sock *sk);
int mptcp_sub_len_remove_addr_align(u16 bitfield);
void mptcp_init_buffer_space(struct sock *sk);
void mptcp_join_reqsk_init(const struct mptcp_cb *mpcb,
const struct request_sock *req,
struct sk_buff *skb);
Expand Down Expand Up @@ -1330,7 +1329,6 @@ static inline bool mptcp_can_new_subflow(const struct sock *meta_sk)

/* TCP and MPTCP mpc flag-depending functions */
u16 mptcp_select_window(struct sock *sk);
void mptcp_init_buffer_space(struct sock *sk);
void mptcp_tcp_set_rto(struct sock *sk);

/* TCP and MPTCP flag-depending functions */
Expand Down
58 changes: 58 additions & 0 deletions net/mptcp/mptcp_ctrl.c
Original file line number Diff line number Diff line change
Expand Up @@ -993,6 +993,64 @@ int mptcp_backlog_rcv(struct sock *meta_sk, struct sk_buff *skb)
return ret;
}

static void mptcp_init_buffer_space(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sock *meta_sk = mptcp_meta_sk(sk);
struct tcp_sock *meta_tp = tcp_sk(meta_sk);
int space;

tcp_init_buffer_space(sk);

if (is_master_tp(tp)) {
meta_tp->rcvq_space.space = meta_tp->rcv_wnd;
tcp_mstamp_refresh(meta_tp);
meta_tp->rcvq_space.time = meta_tp->tcp_mstamp;
meta_tp->rcvq_space.seq = meta_tp->copied_seq;

/* If there is only one subflow, we just use regular TCP
* autotuning. User-locks are handled already by
* tcp_init_buffer_space
*/
meta_tp->window_clamp = tp->window_clamp;
meta_tp->rcv_ssthresh = tp->rcv_ssthresh;
meta_sk->sk_rcvbuf = sk->sk_rcvbuf;
meta_sk->sk_sndbuf = sk->sk_sndbuf;

return;
}

if (meta_sk->sk_userlocks & SOCK_RCVBUF_LOCK)
goto snd_buf;

/* Adding a new subflow to the rcv-buffer space. We make a simple
* addition, to give some space to allow traffic on the new subflow.
* Autotuning will increase it further later on.
*/
space = min(meta_sk->sk_rcvbuf + sk->sk_rcvbuf,
sock_net(meta_sk)->ipv4.sysctl_tcp_rmem[2]);
if (space > meta_sk->sk_rcvbuf) {
meta_tp->window_clamp += tp->window_clamp;
meta_tp->rcv_ssthresh += tp->rcv_ssthresh;
meta_sk->sk_rcvbuf = space;
}

snd_buf:
if (meta_sk->sk_userlocks & SOCK_SNDBUF_LOCK)
return;

/* Adding a new subflow to the send-buffer space. We make a simple
* addition, to give some space to allow traffic on the new subflow.
* Autotuning will increase it further later on.
*/
space = min(meta_sk->sk_sndbuf + sk->sk_sndbuf,
sock_net(meta_sk)->ipv4.sysctl_tcp_wmem[2]);
if (space > meta_sk->sk_sndbuf) {
meta_sk->sk_sndbuf = space;
meta_sk->sk_write_space(meta_sk);
}
}

struct lock_class_key meta_key;
char *meta_key_name = "sk_lock-AF_INET-MPTCP";
struct lock_class_key meta_slock_key;
Expand Down
58 changes: 0 additions & 58 deletions net/mptcp/mptcp_input.c
Original file line number Diff line number Diff line change
Expand Up @@ -2408,64 +2408,6 @@ bool mptcp_should_expand_sndbuf(const struct sock *sk)
return false;
}

void mptcp_init_buffer_space(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sock *meta_sk = mptcp_meta_sk(sk);
struct tcp_sock *meta_tp = tcp_sk(meta_sk);
int space;

tcp_init_buffer_space(sk);

if (is_master_tp(tp)) {
meta_tp->rcvq_space.space = meta_tp->rcv_wnd;
tcp_mstamp_refresh(meta_tp);
meta_tp->rcvq_space.time = meta_tp->tcp_mstamp;
meta_tp->rcvq_space.seq = meta_tp->copied_seq;

/* If there is only one subflow, we just use regular TCP
* autotuning. User-locks are handled already by
* tcp_init_buffer_space
*/
meta_tp->window_clamp = tp->window_clamp;
meta_tp->rcv_ssthresh = tp->rcv_ssthresh;
meta_sk->sk_rcvbuf = sk->sk_rcvbuf;
meta_sk->sk_sndbuf = sk->sk_sndbuf;

return;
}

if (meta_sk->sk_userlocks & SOCK_RCVBUF_LOCK)
goto snd_buf;

/* Adding a new subflow to the rcv-buffer space. We make a simple
* addition, to give some space to allow traffic on the new subflow.
* Autotuning will increase it further later on.
*/
space = min(meta_sk->sk_rcvbuf + sk->sk_rcvbuf,
sock_net(meta_sk)->ipv4.sysctl_tcp_rmem[2]);
if (space > meta_sk->sk_rcvbuf) {
meta_tp->window_clamp += tp->window_clamp;
meta_tp->rcv_ssthresh += tp->rcv_ssthresh;
meta_sk->sk_rcvbuf = space;
}

snd_buf:
if (meta_sk->sk_userlocks & SOCK_SNDBUF_LOCK)
return;

/* Adding a new subflow to the send-buffer space. We make a simple
* addition, to give some space to allow traffic on the new subflow.
* Autotuning will increase it further later on.
*/
space = min(meta_sk->sk_sndbuf + sk->sk_sndbuf,
sock_net(meta_sk)->ipv4.sysctl_tcp_wmem[2]);
if (space > meta_sk->sk_sndbuf) {
meta_sk->sk_sndbuf = space;
meta_sk->sk_write_space(meta_sk);
}
}

void mptcp_tcp_set_rto(struct sock *sk)
{
tcp_set_rto(sk);
Expand Down

0 comments on commit 3957d58

Please sign in to comment.