@@ -2019,7 +2019,8 @@ struct tcp_request_sock_ops {
#endif
void (*init_req)(struct request_sock *req,
struct sock *sk_listener,
- struct sk_buff *skb);
+ struct sk_buff *skb,
+ bool syncookie_req);
#ifdef CONFIG_SYN_COOKIES
__u32 (*cookie_init_seq)(const struct sk_buff *skb,
__u16 *mss);
@@ -6697,7 +6697,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
/* Note: tcp_v6_init_req() might override ir_iif for link locals */
inet_rsk(req)->ir_iif = inet_request_bound_dev_if(sk, skb);
- af_ops->init_req(req, sk, skb);
+ af_ops->init_req(req, sk, skb, want_cookie);
if (IS_ENABLED(CONFIG_MPTCP) && want_cookie)
tcp_rsk(req)->is_mptcp = 0;
@@ -1421,7 +1421,8 @@ static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
static void tcp_v4_init_req(struct request_sock *req,
struct sock *sk_listener,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ bool want_cookie)
{
struct inet_request_sock *ireq = inet_rsk(req);
struct net *net = sock_net(sk_listener);
@@ -793,7 +793,8 @@ static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
static void tcp_v6_init_req(struct request_sock *req,
struct sock *sk_listener,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ bool want_cookie)
{
bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
struct inet_request_sock *ireq = inet_rsk(req);
@@ -128,7 +128,8 @@ static int __subflow_check_options(const struct mptcp_options_received *mp_opt,
static void subflow_init_req(struct request_sock *req,
const struct sock *sk_listener,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ bool want_cookie)
{
struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
@@ -176,25 +177,27 @@ static void subflow_init_req(struct request_sock *req,
static void subflow_v4_init_req(struct request_sock *req,
struct sock *sk_listener,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ bool want_cookie)
{
tcp_rsk(req)->is_mptcp = 1;
- tcp_request_sock_ipv4_ops.init_req(req, sk_listener, skb);
+ tcp_request_sock_ipv4_ops.init_req(req, sk_listener, skb, want_cookie);
- subflow_init_req(req, sk_listener, skb);
+ subflow_init_req(req, sk_listener, skb, want_cookie);
}
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
static void subflow_v6_init_req(struct request_sock *req,
struct sock *sk_listener,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ bool want_cookie)
{
tcp_rsk(req)->is_mptcp = 1;
- tcp_request_sock_ipv6_ops.init_req(req, sk_listener, skb);
+ tcp_request_sock_ipv6_ops.init_req(req, sk_listener, skb, want_cookie);
- subflow_init_req(req, sk_listener, skb);
+ subflow_init_req(req, sk_listener, skb, want_cookie);
}
#endif
In MPTCP case, we want to know if we should store a new token id or if we should try best-effort only (cookie case). This allows the MPTCP core to detect when it should elide the storage of the generated MPTCP token. Signed-off-by: Florian Westphal <fw@strlen.de> --- This isn't nice either, its useless from TCP pov. One alternative would be to add a bit in the mptcp request socket and use that instead. Another alternative would be to store the token normally but then toss it again as soon as request sk is discarded again. Let me know if I should evaluate a different approach. include/net/tcp.h | 3 ++- net/ipv4/tcp_input.c | 2 +- net/ipv4/tcp_ipv4.c | 3 ++- net/ipv6/tcp_ipv6.c | 3 ++- net/mptcp/subflow.c | 17 ++++++++++------- 5 files changed, 17 insertions(+), 11 deletions(-)