@@ -439,6 +439,7 @@
runtime/go-string-to-int-array.c \
runtime/go-strplus.c \
runtime/go-strslice.c \
+ runtime/go-traceback.c \
runtime/go-trampoline.c \
runtime/go-type-complex.c \
runtime/go-type-eface.c \
@@ -157,7 +157,6 @@
for (i = 0; runtime_sigtab[i].sig != -1; ++i)
{
- struct sigaction sa;
SigTab *t;
t = &runtime_sigtab[i];
@@ -177,21 +176,33 @@
runtime_startpanic ();
- /* We should do a stack backtrace here. Until we can do that,
- we reraise the signal in order to get a slightly better
- report from the shell. */
+ {
+ const char *name = NULL;
- memset (&sa, 0, sizeof sa);
+#ifdef HAVE_STRSIGNAL
+ name = strsignal (sig);
+#endif
- sa.sa_handler = SIG_DFL;
+ if (name == NULL)
+ runtime_printf ("Signal %d\n", sig);
+ else
+ runtime_printf ("%s\n", name);
+ }
- i = sigemptyset (&sa.sa_mask);
- __go_assert (i == 0);
+ runtime_printf ("\n");
- if (sigaction (sig, &sa, NULL) != 0)
- abort ();
+ if (runtime_gotraceback ())
+ {
+ G *g;
- raise (sig);
+ g = runtime_g ();
+ runtime_traceback (g);
+ runtime_tracebackothers (g);
+
+ /* The gc library calls runtime_dumpregs here, and provides
+ a function that prints the registers saved in context in
+ a readable form. */
+ }
runtime_exit (2);
}
@@ -230,12 +241,22 @@
sig_panic_info_handler (int sig, siginfo_t *info,
void *context __attribute__ ((unused)))
{
- if (runtime_g () == NULL || info->si_code == SI_USER)
+ G *g;
+
+ g = runtime_g ();
+ if (g == NULL || info->si_code == SI_USER)
{
sig_handler (sig);
return;
}
+ g->sig = sig;
+ g->sigcode0 = info->si_code;
+ g->sigcode1 = (uintptr_t) info->si_addr;
+
+ /* It would be nice to set g->sigpc here as the gc library does, but
+ I don't know how to get it portably. */
+
sig_panic_leadin (sig);
switch (sig)
@@ -284,12 +305,19 @@
static void
sig_panic_handler (int sig)
{
- if (runtime_g () == NULL)
+ G *g;
+
+ g = runtime_g ();
+ if (g == NULL)
{
sig_handler (sig);
return;
}
+ g->sig = sig;
+ g->sigcode0 = 0;
+ g->sigcode1 = 0;
+
sig_panic_leadin (sig);
switch (sig)
@@ -0,0 +1,62 @@
+/* go-callers.c -- get callers for Go.
+
+ Copyright 2012 The Go Authors. All rights reserved.
+ Use of this source code is governed by a BSD-style
+ license that can be found in the LICENSE file. */
+
+#include "config.h"
+
+#include "unwind.h"
+
+#include "runtime.h"
+#include "go-string.h"
+
+static _Unwind_Reason_Code
+traceback (struct _Unwind_Context *context, void *varg)
+{
+ int *parg = (int *) varg;
+ uintptr pc;
+ int ip_before_insn = 0;
+ struct __go_string fn;
+ struct __go_string file;
+ int line;
+
+#ifdef HAVE_GETIPINFO
+ pc = _Unwind_GetIPInfo (context, &ip_before_insn);
+#else
+ pc = _Unwind_GetIP (context);
+#endif
+
+ if (*parg > 100)
+ return _URC_END_OF_STACK;
+ ++*parg;
+
+ /* FIXME: If PC is in the __morestack routine, we should ignore
+ it. */
+
+ /* Back up to the call instruction. */
+ if (!ip_before_insn)
+ --pc;
+
+ if (!__go_file_line (pc, &fn, &file, &line))
+ return _URC_END_OF_STACK;
+
+ if (runtime_showframe (fn.__data))
+ {
+ runtime_printf ("%s\n", fn.__data);
+ runtime_printf ("\t%s:%d\n", file.__data, line);
+ }
+
+ return _URC_NO_REASON;
+}
+
+/* Print a stack trace for the current goroutine. */
+
+void
+runtime_traceback ()
+{
+ int c;
+
+ c = 0;
+ _Unwind_Backtrace (traceback, &c);
+}
@@ -361,10 +361,11 @@
g->writenbuf = b.__count;
USED(pc);
USED(sp);
- // runtime_goroutineheader(g);
- // runtime_traceback(pc, sp, 0, g);
- // if(all)
- // runtime_tracebackothers(g);
+ runtime_goroutineheader(g);
+ runtime_traceback();
+ runtime_goroutinetrailer(g);
+ if(all)
+ runtime_tracebackothers(g);
n = b.__count - g->writenbuf;
g->writebuf = nil;
g->writenbuf = 0;
@@ -50,6 +50,8 @@
static void schedule(G*);
+static void gtraceback(G*);
+
typedef struct Sched Sched;
M runtime_m0;
@@ -345,6 +347,9 @@
// the values for this thread.
mp = runtime_m();
gp = runtime_g();
+
+ if(gp->dotraceback != nil)
+ gtraceback(gp);
}
if (gp == nil || !gp->fromgogo) {
#ifdef USING_SPLIT_STACK
@@ -523,19 +528,73 @@
}
void
-runtime_tracebackothers(G *me)
+runtime_goroutinetrailer(G *g)
{
- G *g;
+ if(g != nil && g->gopc != 0 && g->goid != 1) {
+ struct __go_string fn;
+ struct __go_string file;
+ int line;
+
+ if(__go_file_line(g->gopc - 1, &fn, &file, &line)) {
+ runtime_printf("created by %s\n", fn.__data);
+ runtime_printf("\t%s:%d\n", file.__data, line);
+ }
+ }
+}
+
+void
+runtime_tracebackothers(G * volatile me)
+{
+ G * volatile g;
for(g = runtime_allg; g != nil; g = g->alllink) {
if(g == me || g->status == Gdead)
continue;
runtime_printf("\n");
runtime_goroutineheader(g);
- // runtime_traceback(g->sched.pc, g->sched.sp, 0, g);
+
+ // Our only mechanism for doing a stack trace is
+ // _Unwind_Backtrace. And that only works for the
+ // current thread, not for other random goroutines.
+ // So we need to switch context to the goroutine, get
+ // the backtrace, and then switch back.
+
+ // This means that if is running or in a syscall, we
+ // can't reliably print a stack trace. FIXME.
+ if(g->status == Gsyscall || g->status == Grunning) {
+ runtime_printf("no stack trace available\n");
+ runtime_goroutinetrailer(g);
+ continue;
+ }
+
+ g->dotraceback = me;
+
+#ifdef USING_SPLIT_STACK
+ __splitstack_getcontext(&me->stack_context[0]);
+#endif
+ getcontext(&me->context);
+
+ if(g->dotraceback) {
+ runtime_gogo(g);
+ }
}
}
+// Do a stack trace of gp, and then restore the context to
+// g->dotraceback.
+
+static void
+gtraceback(G* gp)
+{
+ G* ret;
+
+ runtime_traceback(nil);
+ runtime_goroutinetrailer(gp);
+ ret = gp->dotraceback;
+ gp->dotraceback = nil;
+ runtime_gogo(ret);
+}
+
// Mark this g as m's idle goroutine.
// This functionality might be used in environments where programs
// are limited to a single thread, to simulate a select-driven
@@ -1171,7 +1230,7 @@
// Leave SP around for gc and traceback.
#ifdef USING_SPLIT_STACK
- g->gcstack = __splitstack_find(NULL, NULL, &g->gcstack_size,
+ g->gcstack = __splitstack_find(nil, nil, &g->gcstack_size,
&g->gcnext_segment, &g->gcnext_sp,
&g->gcinitial_sp);
#else
@@ -1227,9 +1286,11 @@
// find that we still have mcpu <= mcpumax, then we can
// start executing Go code immediately, without having to
// schedlock/schedunlock.
+ // Also do fast return if any locks are held, so that
+ // panic code can use syscalls to open a file.
gp = g;
v = runtime_xadd(&runtime_sched.atomic, (1<<mcpuShift));
- if(m->profilehz == runtime_sched.profilehz && atomic_mcpu(v) <= atomic_mcpumax(v)) {
+ if((m->profilehz == runtime_sched.profilehz && atomic_mcpu(v) <= atomic_mcpumax(v)) || m->locks > 0) {
// There's a cpu for us, so we can run.
gp->status = Grunning;
// Garbage collector isn't running (since we are),
@@ -1561,7 +1622,7 @@
uint8 *lr __attribute__ ((unused)),
G *gp __attribute__ ((unused)))
{
- // int32 n;
+ int32 n;
if(prof.fn == nil || prof.hz == 0)
return;
@@ -1571,9 +1632,9 @@
runtime_unlock(&prof);
return;
}
- // n = runtime_gentraceback(pc, sp, lr, gp, 0, prof.pcbuf, nelem(prof.pcbuf));
- // if(n > 0)
- // prof.fn(prof.pcbuf, n);
+ n = runtime_callers(0, prof.pcbuf, nelem(prof.pcbuf));
+ if(n > 0)
+ prof.fn(prof.pcbuf, n);
runtime_unlock(&prof);
}
@@ -11,6 +11,17 @@
uint32 runtime_panicking;
+int32
+runtime_gotraceback(void)
+{
+ const byte *p;
+
+ p = runtime_getenv("GOTRACEBACK");
+ if(p == nil || p[0] == '\0')
+ return 1; // default is on
+ return runtime_atoi(p);
+}
+
static Lock paniclk;
void
@@ -31,20 +42,26 @@
void
runtime_dopanic(int32 unused __attribute__ ((unused)))
{
- /*
+ G* g;
static bool didothers;
+ g = runtime_g();
if(g->sig != 0)
- runtime_printf("[signal %x code=%p addr=%p pc=%p]\n",
- g->sig, g->sigcode0, g->sigcode1, g->sigpc);
+ runtime_printf("[signal %x code=%p addr=%p]\n",
+ g->sig, (void*)(g->sigcode0), (void*)(g->sigcode1));
if(runtime_gotraceback()){
+ if(g != runtime_m()->g0) {
+ runtime_printf("\n");
+ runtime_goroutineheader(g);
+ runtime_traceback();
+ runtime_goroutinetrailer(g);
+ }
if(!didothers) {
didothers = true;
runtime_tracebackothers(g);
}
}
- */
runtime_unlock(&paniclk);
if(runtime_xadd(&runtime_panicking, -1) != 0) {
@@ -185,10 +202,10 @@
}
static struct root_list runtime_roots =
-{ NULL,
+{ nil,
{ { &syscall_Envs, sizeof syscall_Envs },
{ &os_Args, sizeof os_Args },
- { NULL, 0 } },
+ { nil, 0 } },
};
void
@@ -209,3 +226,13 @@
return 0;
#endif
}
+
+bool
+runtime_showframe(const unsigned char *s)
+{
+ static int32 traceback = -1;
+
+ if(traceback < 0)
+ traceback = runtime_gotraceback();
+ return traceback > 1 || (__builtin_strchr((const char*)s, '.') != nil && __builtin_memcmp(s, "runtime.", 7) != 0);
+}
@@ -143,14 +143,16 @@
M* m; // for debuggers, but offset not hard-coded
M* lockedm;
M* idlem;
- // int32 sig;
+ int32 sig;
int32 writenbuf;
byte* writebuf;
- // uintptr sigcode0;
- // uintptr sigcode1;
+ uintptr sigcode0;
+ uintptr sigcode1;
// uintptr sigpc;
uintptr gopc; // pc of go statement that created this goroutine
+ G* dotraceback;
+
ucontext_t context;
void* stack_context[10];
};
@@ -289,6 +291,11 @@
void runtime_schedinit(void);
void runtime_initsig(void);
void runtime_sigenable(uint32 sig);
+int32 runtime_gotraceback(void);
+void runtime_goroutineheader(G*);
+void runtime_goroutinetrailer(G*);
+void runtime_traceback();
+void runtime_tracebackothers(G*);
String runtime_gostringnocopy(const byte*);
void* runtime_mstart(void*);
G* runtime_malg(int32, byte**, size_t*);
@@ -434,6 +441,8 @@
void runtime_LockOSThread(void) __asm__("runtime.LockOSThread");
void runtime_UnlockOSThread(void) __asm__("runtime.UnlockOSThread");
+bool runtime_showframe(const unsigned char*);
+
uintptr runtime_memlimit(void);
// If appropriate, ask the operating system to control whether this
@@ -468,3 +477,5 @@
// This will be 0 when using split stacks, as in that case
// the stacks are allocated by the splitstack library.
extern uintptr runtime_stacks_sys;
+
+extern _Bool __go_file_line (uintptr, String*, String*, int *);