@@ -1655,6 +1655,13 @@ union bpf_attr {
* A 8-byte long non-decreasing number on success, or 0 if the
* socket field is missing inside *skb*.
*
+ * u64 bpf_get_socket_cookie(struct bpf_sock *sk)
+ * Description
+ * Equivalent to bpf_get_socket_cookie() helper that accepts
+ * *skb*, but gets socket from **struct bpf_sock** context.
+ * Return
+ * A 8-byte long non-decreasing number.
+ *
* u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
* Description
* Equivalent to bpf_get_socket_cookie() helper that accepts
@@ -4463,6 +4470,7 @@ struct sk_reuseport_md {
__u32 bind_inany; /* Is sock bound to an INANY address? */
__u32 hash; /* A hash of the packet 4 tuples */
__u8 migration; /* Migration type */
+ __bpf_md_ptr(struct bpf_sock *, sk); /* Current listening socket */
};
#define BPF_TAG_SIZE 8
@@ -4631,6 +4631,18 @@ static const struct bpf_func_proto bpf_get_socket_cookie_sock_proto = {
.arg1_type = ARG_PTR_TO_CTX,
};
+BPF_CALL_1(bpf_get_socket_pointer_cookie, struct sock *, sk)
+{
+ return __sock_gen_cookie(sk);
+}
+
+static const struct bpf_func_proto bpf_get_socket_pointer_cookie_proto = {
+ .func = bpf_get_socket_pointer_cookie,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_SOCKET,
+};
+
BPF_CALL_1(bpf_get_socket_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx)
{
return __sock_gen_cookie(ctx->sk);
@@ -9989,6 +10001,8 @@ sk_reuseport_func_proto(enum bpf_func_id func_id,
return &sk_reuseport_load_bytes_proto;
case BPF_FUNC_skb_load_bytes_relative:
return &sk_reuseport_load_bytes_relative_proto;
+ case BPF_FUNC_get_socket_cookie:
+ return &bpf_get_socket_pointer_cookie_proto;
default:
return bpf_base_func_proto(func_id);
}
@@ -10022,6 +10036,10 @@ sk_reuseport_is_valid_access(int off, int size,
return prog->expected_attach_type == BPF_SK_REUSEPORT_SELECT_OR_MIGRATE &&
size == sizeof(__u8);
+ case offsetof(struct sk_reuseport_md, sk):
+ info->reg_type = PTR_TO_SOCKET;
+ return size == sizeof(__u64);
+
/* Fields that allow narrowing */
case bpf_ctx_range(struct sk_reuseport_md, eth_protocol):
if (size < sizeof_field(struct sk_buff, protocol))
@@ -10098,6 +10116,10 @@ static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type,
case offsetof(struct sk_reuseport_md, migration):
SK_REUSEPORT_LOAD_FIELD(migration);
break;
+
+ case offsetof(struct sk_reuseport_md, sk):
+ SK_REUSEPORT_LOAD_FIELD(sk);
+ break;
}
return insn - insn_buf;
@@ -1655,6 +1655,13 @@ union bpf_attr {
* A 8-byte long non-decreasing number on success, or 0 if the
* socket field is missing inside *skb*.
*
+ * u64 bpf_get_socket_cookie(struct bpf_sock *sk)
+ * Description
+ * Equivalent to bpf_get_socket_cookie() helper that accepts
+ * *skb*, but gets socket from **struct bpf_sock** context.
+ * Return
+ * A 8-byte long non-decreasing number.
+ *
* u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
* Description
* Equivalent to bpf_get_socket_cookie() helper that accepts
@@ -4463,6 +4470,7 @@ struct sk_reuseport_md {
__u32 bind_inany; /* Is sock bound to an INANY address? */
__u32 hash; /* A hash of the packet 4 tuples */
__u8 migration; /* Migration type */
+ __bpf_md_ptr(struct bpf_sock *, sk); /* Current listening socket */
};
#define BPF_TAG_SIZE 8
We will call sock_reuseport.prog for socket migration in the next commit, so the eBPF program has to know which listener is closing in order to select the new listener. Currently, we can get a unique ID for each listener in the userspace by calling bpf_map_lookup_elem() for BPF_MAP_TYPE_REUSEPORT_SOCKARRAY map. This patch makes the sk pointer available in sk_reuseport_md so that we can get the ID by BPF_FUNC_get_socket_cookie() in the eBPF program. Link: https://lore.kernel.org/netdev/20201119001154.kapwihc2plp4f7zc@kafai-mbp.dhcp.thefacebook.com/ Suggested-by: Martin KaFai Lau <kafai@fb.com> Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.co.jp> --- include/uapi/linux/bpf.h | 8 ++++++++ net/core/filter.c | 22 ++++++++++++++++++++++ tools/include/uapi/linux/bpf.h | 8 ++++++++ 3 files changed, 38 insertions(+)