@@ -246,6 +246,34 @@ void proc_comm_connector(struct task_struct *task)
send_msg(msg);
}
+void proc_ns_connector(struct task_struct *task, int type, int reason, u64 old_inum, u64 inum)
+{
+ struct cn_msg *msg;
+ struct proc_event *ev;
+ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ ev->timestamp_ns = ktime_get_ns();
+ ev->what = PROC_EVENT_NM;
+ ev->event_data.nm.process_pid = task->pid;
+ ev->event_data.nm.process_tgid = task->tgid;
+ ev->event_data.nm.type = type;
+ ev->event_data.nm.reason = reason;
+ ev->event_data.nm.old_inum = old_inum;
+ ev->event_data.nm.inum = inum;
+
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
+ msg->flags = 0; /* not used */
+ send_msg(msg);
+}
+
void proc_coredump_connector(struct task_struct *task)
{
struct cn_msg *msg;
@@ -26,6 +26,7 @@ void proc_id_connector(struct task_struct *task, int which_id);
void proc_sid_connector(struct task_struct *task);
void proc_ptrace_connector(struct task_struct *task, int which_id);
void proc_comm_connector(struct task_struct *task);
+void proc_ns_connector(struct task_struct *task, int type, int change, u64 old_inum, u64 inum);
void proc_coredump_connector(struct task_struct *task);
void proc_exit_connector(struct task_struct *task);
#else
@@ -45,6 +46,9 @@ static inline void proc_sid_connector(struct task_struct *task)
static inline void proc_comm_connector(struct task_struct *task)
{}
+static inline void proc_ns_connector(struct task_struct *task, int type, int change, u64 old_inum, u64 inum)
+{}
+
static inline void proc_ptrace_connector(struct task_struct *task,
int ptrace_id)
{}
@@ -55,7 +55,8 @@ struct proc_event {
PROC_EVENT_SID = 0x00000080,
PROC_EVENT_PTRACE = 0x00000100,
PROC_EVENT_COMM = 0x00000200,
- /* "next" should be 0x00000400 */
+ PROC_EVENT_NM = 0x00000400,
+ /* "next" should be 0x00000800 */
/* "last" is the last process event: exit,
* while "next to last" is coredumping event */
PROC_EVENT_COREDUMP = 0x40000000,
@@ -112,6 +113,19 @@ struct proc_event {
char comm[16];
} comm;
+ struct nm_proc_event {
+ __kernel_pid_t process_pid;
+ __kernel_pid_t process_tgid;
+ __u32 type; /* CLONE_NEWNS, CLONE_NEWPID, ... */
+ enum reason {
+ PROC_NM_REASON_CLONE = 0x00000001,
+ PROC_NM_REASON_SET = 0x00000002, /* setns or unshare */
+ PROC_NM_REASON_LAST = 0x80000000,
+ } reason;
+ __u64 old_inum;
+ __u64 inum;
+ } nm;
+
struct coredump_proc_event {
__kernel_pid_t process_pid;
__kernel_pid_t process_tgid;
@@ -26,6 +26,7 @@
#include <linux/file.h>
#include <linux/syscalls.h>
#include <linux/cgroup.h>
+#include <linux/cn_proc.h>
static struct kmem_cache *nsproxy_cachep;
@@ -139,6 +140,8 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk)
struct nsproxy *old_ns = tsk->nsproxy;
struct user_namespace *user_ns = task_cred_xxx(tsk, user_ns);
struct nsproxy *new_ns;
+ struct ns_common *mntns;
+ u64 old_mntns_inum = 0;
if (likely(!(flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC |
CLONE_NEWPID | CLONE_NEWNET |
@@ -165,7 +168,41 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk)
if (IS_ERR(new_ns))
return PTR_ERR(new_ns);
+ mntns = mntns_operations.get(tsk);
+ if (mntns) {
+ old_mntns_inum = mntns->inum;
+ mntns_operations.put(mntns);
+ }
+
tsk->nsproxy = new_ns;
+
+ if (old_ns && new_ns) {
+ struct ns_common *mntns;
+ u64 new_mntns_inum = 0;
+ mntns = mntns_operations.get(tsk);
+ if (mntns) {
+ new_mntns_inum = mntns->inum;
+ mntns_operations.put(mntns);
+ }
+ if (old_ns->mnt_ns != new_ns->mnt_ns)
+ proc_ns_connector(tsk, CLONE_NEWNS, PROC_NM_REASON_CLONE, old_mntns_inum, new_mntns_inum);
+
+ if (old_ns->uts_ns != new_ns->uts_ns)
+ proc_ns_connector(tsk, CLONE_NEWUTS, PROC_NM_REASON_CLONE, old_ns->uts_ns->ns.inum, new_ns->uts_ns->ns.inum);
+
+ if (old_ns->ipc_ns != new_ns->ipc_ns)
+ proc_ns_connector(tsk, CLONE_NEWIPC, PROC_NM_REASON_CLONE, old_ns->ipc_ns->ns.inum, new_ns->ipc_ns->ns.inum);
+
+ if (old_ns->net_ns != new_ns->net_ns)
+ proc_ns_connector(tsk, CLONE_NEWNET, PROC_NM_REASON_CLONE, old_ns->net_ns->ns.inum, new_ns->net_ns->ns.inum);
+
+ if (old_ns->cgroup_ns != new_ns->cgroup_ns)
+ proc_ns_connector(tsk, CLONE_NEWCGROUP, PROC_NM_REASON_CLONE, old_ns->cgroup_ns->ns.inum, new_ns->cgroup_ns->ns.inum);
+
+ if (old_ns->pid_ns_for_children != new_ns->pid_ns_for_children)
+ proc_ns_connector(tsk, CLONE_NEWPID, PROC_NM_REASON_CLONE, old_ns->pid_ns_for_children->ns.inum, new_ns->pid_ns_for_children->ns.inum);
+ }
+
return 0;
}
@@ -216,14 +253,48 @@ out:
void switch_task_namespaces(struct task_struct *p, struct nsproxy *new)
{
struct nsproxy *ns;
+ struct ns_common *mntns;
+ u64 old_mntns_inum = 0;
might_sleep();
+ mntns = mntns_operations.get(p);
+ if (mntns) {
+ old_mntns_inum = mntns->inum;
+ mntns_operations.put(mntns);
+ }
+
task_lock(p);
ns = p->nsproxy;
p->nsproxy = new;
task_unlock(p);
+ if (ns && new) {
+ u64 new_mntns_inum = 0;
+ mntns = mntns_operations.get(p);
+ if (mntns) {
+ new_mntns_inum = mntns->inum;
+ mntns_operations.put(mntns);
+ }
+ if (ns->mnt_ns != new->mnt_ns)
+ proc_ns_connector(p, CLONE_NEWNS, PROC_NM_REASON_SET, old_mntns_inum, new_mntns_inum);
+
+ if (ns->uts_ns != new->uts_ns)
+ proc_ns_connector(p, CLONE_NEWUTS, PROC_NM_REASON_SET, ns->uts_ns->ns.inum, new->uts_ns->ns.inum);
+
+ if (ns->ipc_ns != new->ipc_ns)
+ proc_ns_connector(p, CLONE_NEWIPC, PROC_NM_REASON_SET, ns->ipc_ns->ns.inum, new->ipc_ns->ns.inum);
+
+ if (ns->net_ns != new->net_ns)
+ proc_ns_connector(p, CLONE_NEWNET, PROC_NM_REASON_SET, ns->net_ns->ns.inum, new->net_ns->ns.inum);
+
+ if (ns->cgroup_ns != new->cgroup_ns)
+ proc_ns_connector(p, CLONE_NEWCGROUP, PROC_NM_REASON_SET, ns->cgroup_ns->ns.inum, new->cgroup_ns->ns.inum);
+
+ if (ns->pid_ns_for_children != new->pid_ns_for_children)
+ proc_ns_connector(p, CLONE_NEWPID, PROC_NM_REASON_SET, ns->pid_ns_for_children->ns.inum, new->pid_ns_for_children->ns.inum);
+ }
+
if (ns && atomic_dec_and_test(&ns->count))
free_nsproxy(ns);
}