diff mbox

[v2,2/6] cgroup: add support for eBPF programs

Message ID 1472070263-29988-3-git-send-email-daniel@zonque.org
State Changes Requested, archived
Delegated to: David Miller
Headers show

Commit Message

Daniel Mack Aug. 24, 2016, 8:24 p.m. UTC
This patch adds two sets of eBPF program pointers to struct cgroup.
One for such that are directly pinned to a cgroup, and one for such
that are effective for it.

To illustrate the logic behind that, assume the following example
cgroup hierarchy.

  A - B - C
        \ D - E

If only B has a program attached, it will be effective for B, C, D
and E. If D then attaches a program itself, that will be effective for
both D and E, and the program in B will only affect B and C. Only one
program of a given type is effective for a cgroup.

Attaching and detaching programs will be done through the bpf(2)
syscall. For now, ingress and egress inet socket filtering are the
only supported use-cases.

Signed-off-by: Daniel Mack <daniel@zonque.org>
---
 include/linux/bpf-cgroup.h  |  70 +++++++++++++++++++
 include/linux/cgroup-defs.h |   4 ++
 init/Kconfig                |  12 ++++
 kernel/bpf/Makefile         |   1 +
 kernel/bpf/cgroup.c         | 159 ++++++++++++++++++++++++++++++++++++++++++++
 kernel/cgroup.c             |  18 +++++
 6 files changed, 264 insertions(+)
 create mode 100644 include/linux/bpf-cgroup.h
 create mode 100644 kernel/bpf/cgroup.c

Comments

Tejun Heo Aug. 24, 2016, 9:54 p.m. UTC | #1
Hello, Daniel.

On Wed, Aug 24, 2016 at 10:24:19PM +0200, Daniel Mack wrote:
> +void cgroup_bpf_free(struct cgroup *cgrp)
> +{
> +	unsigned int type;
> +
> +	rcu_read_lock();
> +
> +	for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
> +		if (!cgrp->bpf.prog[type])
> +			continue;
> +
> +		bpf_prog_put(cgrp->bpf.prog[type]);
> +		static_branch_dec(&cgroup_bpf_enabled_key);
> +	}
> +
> +	rcu_read_unlock();

These rcu locking seem suspicious to me.  RCU locking on writer side
is usually bogus.  We sometimes do it to work around locking
assertions in accessors but it's a better idea to make the assertions
better in those cases - e.g. sth like assert_mylock_or_rcu_locked().

> +void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent)
> +{
> +	unsigned int type;
> +
> +	rcu_read_lock();

Ditto.

> +	for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++)
> +		rcu_assign_pointer(cgrp->bpf.prog_effective[type],
> +			rcu_dereference(parent->bpf.prog_effective[type]));
> +
> +	rcu_read_unlock();
> +}
...
> +void __cgroup_bpf_update(struct cgroup *cgrp,
> +			 struct cgroup *parent,
> +			 struct bpf_prog *prog,
> +			 enum bpf_attach_type type)
> +{
> +	struct bpf_prog *old_prog, *effective;
> +	struct cgroup_subsys_state *pos;
> +
> +	rcu_read_lock();

Ditto.

> +	old_prog = xchg(cgrp->bpf.prog + type, prog);
> +	if (old_prog) {
> +		bpf_prog_put(old_prog);
> +		static_branch_dec(&cgroup_bpf_enabled_key);
> +	}
> +
> +	if (prog)
> +		static_branch_inc(&cgroup_bpf_enabled_key);

Minor but probably better to inc first and then dec so that you can
avoid unnecessary enabled -> disabled -> enabled sequence.

> +	effective = (!prog && parent) ?
> +		rcu_dereference(parent->bpf.prog_effective[type]) : prog;

If this is what's triggering rcu warnings, there's an accessor to use
in these situations.

> +	rcu_read_unlock();
> +
> +	css_for_each_descendant_pre(pos, &cgrp->self) {

On the other hand, this walk actually requires rcu read locking unless
you're holding cgroup_mutex.

Thanks.
Daniel Mack Aug. 24, 2016, 10:30 p.m. UTC | #2
Hi Tejun,

On 08/24/2016 11:54 PM, Tejun Heo wrote:
> On Wed, Aug 24, 2016 at 10:24:19PM +0200, Daniel Mack wrote:
>> +void cgroup_bpf_free(struct cgroup *cgrp)
>> +{
>> +	unsigned int type;
>> +
>> +	rcu_read_lock();
>> +
>> +	for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
>> +		if (!cgrp->bpf.prog[type])
>> +			continue;
>> +
>> +		bpf_prog_put(cgrp->bpf.prog[type]);
>> +		static_branch_dec(&cgroup_bpf_enabled_key);
>> +	}
>> +
>> +	rcu_read_unlock();
> 
> These rcu locking seem suspicious to me.  RCU locking on writer side
> is usually bogus.  We sometimes do it to work around locking
> assertions in accessors but it's a better idea to make the assertions
> better in those cases - e.g. sth like assert_mylock_or_rcu_locked().

Right, in this case, it is unnecessary, as the bpf.prog[] is not under RCU.

>> +void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent)
>> +{
>> +	unsigned int type;
>> +
>> +	rcu_read_lock();
> 
> Ditto.
> 
>> +	for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++)
>> +		rcu_assign_pointer(cgrp->bpf.prog_effective[type],
>> +			rcu_dereference(parent->bpf.prog_effective[type]));

Okay, yes. We're under cgroup_mutex write-path protection here, so
that's unnecessary too.

>> +void __cgroup_bpf_update(struct cgroup *cgrp,
>> +			 struct cgroup *parent,
>> +			 struct bpf_prog *prog,
>> +			 enum bpf_attach_type type)
>> +{
>> +	struct bpf_prog *old_prog, *effective;
>> +	struct cgroup_subsys_state *pos;
>> +
>> +	rcu_read_lock();
> 
> Ditto.

Yes, agreed, as above.

>> +	old_prog = xchg(cgrp->bpf.prog + type, prog);
>> +	if (old_prog) {
>> +		bpf_prog_put(old_prog);
>> +		static_branch_dec(&cgroup_bpf_enabled_key);
>> +	}
>> +
>> +	if (prog)
>> +		static_branch_inc(&cgroup_bpf_enabled_key);
> 
> Minor but probably better to inc first and then dec so that you can
> avoid unnecessary enabled -> disabled -> enabled sequence.

Good point. Will fix.

>> +	rcu_read_unlock();
>> +
>> +	css_for_each_descendant_pre(pos, &cgrp->self) {
> 
> On the other hand, this walk actually requires rcu read locking unless
> you're holding cgroup_mutex.

I am - this function is always called with cgroup_mutex held through the
wrapper in kernel/cgroup.c.

Thanks a lot - will put all that changes in v3.


Daniel
kernel test robot Aug. 25, 2016, 6:56 a.m. UTC | #3
Hi Daniel,

[auto build test WARNING on net-next/master]
[also build test WARNING on v4.8-rc3 next-20160824]
[cannot apply to linus/master linux/master]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]
[Suggest to use git(>=2.9.0) format-patch --base=<commit> (or --base=auto for convenience) to record what (public, well-known) commit your patch series was built on]
[Check https://git-scm.com/docs/git-format-patch for more information]

url:    https://github.com/0day-ci/linux/commits/Daniel-Mack/Add-eBPF-hooks-for-cgroups/20160825-042759
reproduce:
        # apt-get install sparse
        make ARCH=x86_64 allmodconfig
        make C=1 CF=-D__CHECK_ENDIAN__


sparse warnings: (new ones prefixed by >>)

   include/linux/compiler.h:230:8: sparse: attribute 'no_sanitize_address': unknown attribute
>> kernel/bpf/cgroup.c:46:17: sparse: incompatible types in comparison expression (different address spaces)
>> kernel/bpf/cgroup.c:46:17: sparse: incompatible types in comparison expression (different address spaces)
   kernel/bpf/cgroup.c:97:17: sparse: incompatible types in comparison expression (different address spaces)
   kernel/bpf/cgroup.c:147:16: sparse: incompatible types in comparison expression (different address spaces)

vim +46 kernel/bpf/cgroup.c

    30				continue;
    31	
    32			bpf_prog_put(cgrp->bpf.prog[type]);
    33			static_branch_dec(&cgroup_bpf_enabled_key);
    34		}
    35	
    36		rcu_read_unlock();
    37	}
    38	
    39	void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent)
    40	{
    41		unsigned int type;
    42	
    43		rcu_read_lock();
    44	
    45		for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++)
  > 46			rcu_assign_pointer(cgrp->bpf.prog_effective[type],
    47				rcu_dereference(parent->bpf.prog_effective[type]));
    48	
    49		rcu_read_unlock();
    50	}
    51	
    52	/**
    53	 * __cgroup_bpf_update() - Update the pinned program of a cgroup, and
    54	 *                         propagate the change to descendants

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation
diff mbox

Patch

diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
new file mode 100644
index 0000000..d85d50f
--- /dev/null
+++ b/include/linux/bpf-cgroup.h
@@ -0,0 +1,70 @@ 
+#ifndef _BPF_CGROUP_H
+#define _BPF_CGROUP_H
+
+#include <linux/bpf.h>
+#include <uapi/linux/bpf.h>
+
+struct sock;
+struct cgroup;
+struct sk_buff;
+
+#ifdef CONFIG_CGROUP_BPF
+
+extern struct static_key_false cgroup_bpf_enabled_key;
+#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
+
+struct cgroup_bpf {
+	/*
+	 * Store two sets of bpf_prog pointers, one for programs that are
+	 * pinned directly to this cgroup, and one for those that are effective
+	 * when this cgroup is accessed.
+	 */
+	struct bpf_prog *prog[__MAX_BPF_ATTACH_TYPE];
+	struct bpf_prog *prog_effective[__MAX_BPF_ATTACH_TYPE];
+};
+
+void cgroup_bpf_free(struct cgroup *cgrp);
+void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent);
+
+void __cgroup_bpf_update(struct cgroup *cgrp,
+			 struct cgroup *parent,
+			 struct bpf_prog *prog,
+			 enum bpf_attach_type type);
+
+/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */
+void cgroup_bpf_update(struct cgroup *cgrp,
+		       struct bpf_prog *prog,
+		       enum bpf_attach_type type);
+
+int __cgroup_bpf_run_filter(struct sock *sk,
+			    struct sk_buff *skb,
+			    enum bpf_attach_type type);
+
+/* Wrapper for __cgroup_bpf_run_filter() guarded by cgroup_bpf_enabled */
+static inline int cgroup_bpf_run_filter(struct sock *sk,
+					struct sk_buff *skb,
+					enum bpf_attach_type type)
+{
+	if (cgroup_bpf_enabled)
+		return __cgroup_bpf_run_filter(sk, skb, type);
+
+	return 0;
+}
+
+#else
+
+struct cgroup_bpf {};
+static inline void cgroup_bpf_free(struct cgroup *cgrp) {}
+static inline void cgroup_bpf_inherit(struct cgroup *cgrp,
+				      struct cgroup *parent) {}
+
+static inline int cgroup_bpf_run_filter(struct sock *sk,
+					struct sk_buff *skb,
+					enum bpf_attach_type type)
+{
+	return 0;
+}
+
+#endif /* CONFIG_CGROUP_BPF */
+
+#endif /* _BPF_CGROUP_H */
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 5b17de6..861b467 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -16,6 +16,7 @@ 
 #include <linux/percpu-refcount.h>
 #include <linux/percpu-rwsem.h>
 #include <linux/workqueue.h>
+#include <linux/bpf-cgroup.h>
 
 #ifdef CONFIG_CGROUPS
 
@@ -300,6 +301,9 @@  struct cgroup {
 	/* used to schedule release agent */
 	struct work_struct release_agent_work;
 
+	/* used to store eBPF programs */
+	struct cgroup_bpf bpf;
+
 	/* ids of the ancestors at each level including self */
 	int ancestor_ids[];
 };
diff --git a/init/Kconfig b/init/Kconfig
index cac3f09..5a89c83 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1144,6 +1144,18 @@  config CGROUP_PERF
 
 	  Say N if unsure.
 
+config CGROUP_BPF
+	bool "Support for eBPF programs attached to cgroups"
+	depends on BPF_SYSCALL && SOCK_CGROUP_DATA
+	help
+	  Allow attaching eBPF programs to a cgroup using the bpf(2)
+	  syscall command BPF_PROG_ATTACH.
+
+	  In which context these programs are accessed depends on the type
+	  of attachment. For instance, programs that are attached using
+	  BPF_ATTACH_TYPE_CGROUP_INET_INGRESS will be executed on the
+	  ingress path of inet sockets.
+
 config CGROUP_DEBUG
 	bool "Example controller"
 	default n
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index eed911d..b22256b 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -5,3 +5,4 @@  obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o
 ifeq ($(CONFIG_PERF_EVENTS),y)
 obj-$(CONFIG_BPF_SYSCALL) += stackmap.o
 endif
+obj-$(CONFIG_CGROUP_BPF) += cgroup.o
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
new file mode 100644
index 0000000..eacd0e5a3
--- /dev/null
+++ b/kernel/bpf/cgroup.c
@@ -0,0 +1,159 @@ 
+/*
+ * Functions to manage eBPF programs attached to cgroups
+ *
+ * Copyright (C) 2016 Daniel Mack
+ *
+ * This file is subject to the terms and conditions of version 2 of the GNU
+ * General Public License.  See the file COPYING in the main directory of the
+ * Linux distribution for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/atomic.h>
+#include <linux/cgroup.h>
+#include <linux/slab.h>
+#include <linux/bpf.h>
+#include <linux/bpf-cgroup.h>
+#include <net/sock.h>
+
+DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
+EXPORT_SYMBOL(cgroup_bpf_enabled_key);
+
+void cgroup_bpf_free(struct cgroup *cgrp)
+{
+	unsigned int type;
+
+	rcu_read_lock();
+
+	for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
+		if (!cgrp->bpf.prog[type])
+			continue;
+
+		bpf_prog_put(cgrp->bpf.prog[type]);
+		static_branch_dec(&cgroup_bpf_enabled_key);
+	}
+
+	rcu_read_unlock();
+}
+
+void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent)
+{
+	unsigned int type;
+
+	rcu_read_lock();
+
+	for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++)
+		rcu_assign_pointer(cgrp->bpf.prog_effective[type],
+			rcu_dereference(parent->bpf.prog_effective[type]));
+
+	rcu_read_unlock();
+}
+
+/**
+ * __cgroup_bpf_update() - Update the pinned program of a cgroup, and
+ *                         propagate the change to descendants
+ * @cgrp: The cgroup which descendants to traverse
+ * @prog: A new program to pin
+ * @type: Type of pinning operation (ingress/egress)
+ *
+ * Each cgroup has a set of two pointers for bpf programs; one for eBPF
+ * programs it owns, and which is effective for execution.
+ *
+ * If @prog is %NULL, this function attaches a new program to the cgroup and
+ * releases the one that is currently attached, if any. @prog is then made
+ * the effective program of type @type in that cgroup.
+ *
+ * If @prog is %NULL, the currently attached program of type @type is released,
+ * and, the effective program of the parent cgroup is inherited to @cgrp.
+ *
+ * Then, the descendants of @cgrp are walked and the effective program for
+ * each of them is set to the effective program of @cgrp, unless the
+ * descendant has its own program attached, in which case the subbranch is
+ * skipped. This ensures that delegated subcgroups with own programs are left
+ * untouched.
+ *
+ * Must be called with cgroup_mutex held.
+ */
+void __cgroup_bpf_update(struct cgroup *cgrp,
+			 struct cgroup *parent,
+			 struct bpf_prog *prog,
+			 enum bpf_attach_type type)
+{
+	struct bpf_prog *old_prog, *effective;
+	struct cgroup_subsys_state *pos;
+
+	rcu_read_lock();
+
+	old_prog = xchg(cgrp->bpf.prog + type, prog);
+	if (old_prog) {
+		bpf_prog_put(old_prog);
+		static_branch_dec(&cgroup_bpf_enabled_key);
+	}
+
+	if (prog)
+		static_branch_inc(&cgroup_bpf_enabled_key);
+
+	effective = (!prog && parent) ?
+		rcu_dereference(parent->bpf.prog_effective[type]) : prog;
+
+	rcu_read_unlock();
+
+	css_for_each_descendant_pre(pos, &cgrp->self) {
+		struct cgroup *desc = container_of(pos, struct cgroup, self);
+
+		/* skip the subtree if the descendant has its own program */
+		if (desc->bpf.prog[type] && desc != cgrp)
+			pos = css_rightmost_descendant(pos);
+		else
+			rcu_assign_pointer(desc->bpf.prog_effective[type],
+					   effective);
+	}
+}
+
+/**
+ * __cgroup_bpf_run_filter() - Run a program for packet filtering
+ * @sk: The socken sending or receiving traffic
+ * @skb: The skb that is being sent or received
+ * @type: The type of program to be exectuted
+ *
+ * If no socket is passed, or the socket is not of type INET or INET6,
+ * this function does nothing and returns 0.
+ *
+ * The program type passed in via @type must be suitable for network
+ * filtering. No further check is performed to assert that.
+ *
+ * This function will return %-EPERM if any if an attached program was found
+ * and if it returned != 1 during execution. In all other cases, 0 is returned.
+ */
+int __cgroup_bpf_run_filter(struct sock *sk,
+			    struct sk_buff *skb,
+			    enum bpf_attach_type type)
+{
+	struct bpf_prog *prog;
+	struct cgroup *cgrp;
+	int ret = 0;
+
+	if (!sk)
+		return 0;
+
+	if (sk->sk_family != AF_INET &&
+	    sk->sk_family != AF_INET6)
+		return 0;
+
+	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
+
+	rcu_read_lock();
+
+	prog = rcu_dereference(cgrp->bpf.prog_effective[type]);
+	if (prog) {
+		unsigned int offset = skb->data - skb_mac_header(skb);
+
+		__skb_push(skb, offset);
+		ret = bpf_prog_run_clear_cb(prog, skb) == 1 ? 0 : -EPERM;
+		__skb_pull(skb, offset);
+	}
+
+	rcu_read_unlock();
+
+	return ret;
+}
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index d1c51b7..d53d4b5 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -5038,6 +5038,8 @@  static void css_release_work_fn(struct work_struct *work)
 		if (cgrp->kn)
 			RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv,
 					 NULL);
+
+		cgroup_bpf_free(cgrp);
 	}
 
 	mutex_unlock(&cgroup_mutex);
@@ -5245,6 +5247,9 @@  static struct cgroup *cgroup_create(struct cgroup *parent)
 	if (!cgroup_on_dfl(cgrp))
 		cgrp->subtree_control = cgroup_control(cgrp);
 
+	if (parent)
+		cgroup_bpf_inherit(cgrp, parent);
+
 	cgroup_propagate_control(cgrp);
 
 	/* @cgrp doesn't have dir yet so the following will only create csses */
@@ -6417,6 +6422,19 @@  static __init int cgroup_namespaces_init(void)
 }
 subsys_initcall(cgroup_namespaces_init);
 
+#ifdef CONFIG_CGROUP_BPF
+void cgroup_bpf_update(struct cgroup *cgrp,
+		       struct bpf_prog *prog,
+		       enum bpf_attach_type type)
+{
+	struct cgroup *parent = cgroup_parent(cgrp);
+
+	mutex_lock(&cgroup_mutex);
+	__cgroup_bpf_update(cgrp, parent, prog, type);
+	mutex_unlock(&cgroup_mutex);
+}
+#endif /* CONFIG_CGROUP_BPF */
+
 #ifdef CONFIG_CGROUP_DEBUG
 static struct cgroup_subsys_state *
 debug_css_alloc(struct cgroup_subsys_state *parent_css)