@@ -16,6 +16,7 @@
int init_new_context(struct task_struct *task, struct mm_struct *mm)
{
+ struct mm_context _from_mm;
struct mm_context *from_mm = NULL;
struct mm_context *to_mm = &mm->context;
unsigned long stack = 0;
@@ -25,15 +26,26 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
if (stack == 0)
goto out;
+ /*
+ * If the kernel wants a copy, it already copied the entire context.
+ * If not, it's all memset to 0.
+ * So we can detect here whether or not we should copy, and have the
+ * pid we should copy _from_ in our own context struct of the new mm.
+ */
+ if (to_mm->id.u.pid) {
+ _from_mm = *to_mm;
+ from_mm = &_from_mm;
+ }
+
+ memset(to_mm, 0, sizeof(*to_mm));
to_mm->id.stack = stack;
- if (current->mm != NULL && current->mm != &init_mm)
- from_mm = ¤t->mm->context;
block_signals_trace();
if (from_mm)
to_mm->id.u.pid = copy_context_skas0(stack,
from_mm->id.u.pid);
- else to_mm->id.u.pid = start_userspace(stack);
+ else
+ to_mm->id.u.pid = start_userspace(stack);
unblock_signals_trace();
if (to_mm->id.u.pid < 0) {