diff --git a/Documentation/backend_test_health.md b/Documentation/backend_test_health.md index d8b08ee5ce..4fbde47c9b 100644 --- a/Documentation/backend_test_health.md +++ b/Documentation/backend_test_health.md @@ -1,14 +1,12 @@ Tests skipped by each supported backend: -* 386 skipped = 7 - * 1 broken +* 386 skipped = 6 * 3 broken - cgo stacktraces * 3 not implemented -* arm64 skipped = 2 - * 1 broken +* arm64 skipped = 1 * 1 broken - global variable symbolication -* darwin/arm64 skipped = 1 - * 1 broken - cgo stacktraces +* darwin/arm64 skipped = 2 + * 2 broken - cgo stacktraces * darwin/lldb skipped = 1 * 1 upstream issue * freebsd skipped = 4 @@ -17,11 +15,9 @@ Tests skipped by each supported backend: * 1 broken * pie skipped = 2 * 2 upstream issue - https://github.com/golang/go/issues/29322 -* windows skipped = 5 +* windows skipped = 4 * 1 broken * 3 see https://github.com/go-delve/delve/issues/2768 - * 1 upstream issue -* windows/arm64 skipped = 5 +* windows/arm64 skipped = 4 * 3 broken - * 1 broken - cgo stacktraces * 1 broken - step concurrent diff --git a/pkg/proc/amd64_arch.go b/pkg/proc/amd64_arch.go index 2c6b66f300..dc426ac3d8 100644 --- a/pkg/proc/amd64_arch.go +++ b/pkg/proc/amd64_arch.go @@ -225,25 +225,16 @@ func amd64SwitchStack(it *stackIterator, _ *op.DwarfRegisters) bool { it.switchToGoroutineStack() return true - default: - if it.systemstack && it.top && it.g != nil && strings.HasPrefix(it.frame.Current.Fn.Name, "runtime.") && it.frame.Current.Fn.Name != "runtime.throw" && it.frame.Current.Fn.Name != "runtime.fatalthrow" { - // The runtime switches to the system stack in multiple places. - // This usually happens through a call to runtime.systemstack but there - // are functions that switch to the system stack manually (for example - // runtime.morestack). - // Since we are only interested in printing the system stack for cgo - // calls we switch directly to the goroutine stack if we detect that the - // function at the top of the stack is a runtime function. - // - // The function "runtime.throw" is deliberately excluded from this - // because it can end up in the stack during a cgo call and switching to - // the goroutine stack will exclude all the C functions from the stack - // trace. + case "runtime.newstack", "runtime.systemstack": + if it.systemstack && it.g != nil { it.switchToGoroutineStack() return true } return false + + default: + return false } } diff --git a/pkg/proc/arm64_arch.go b/pkg/proc/arm64_arch.go index 6de8c0bfa5..426fbbce4d 100644 --- a/pkg/proc/arm64_arch.go +++ b/pkg/proc/arm64_arch.go @@ -5,7 +5,6 @@ import ( "encoding/binary" "fmt" "runtime" - "strings" "github.com/go-delve/delve/pkg/dwarf/frame" "github.com/go-delve/delve/pkg/dwarf/op" @@ -269,15 +268,9 @@ func arm64SwitchStack(it *stackIterator, callFrameRegs *op.DwarfRegisters) bool it.switchToGoroutineStack() return true } - default: - if it.systemstack && it.top && it.g != nil && strings.HasPrefix(it.frame.Current.Fn.Name, "runtime.") && it.frame.Current.Fn.Name != "runtime.throw" && it.frame.Current.Fn.Name != "runtime.fatalthrow" { - // The runtime switches to the system stack in multiple places. - // This usually happens through a call to runtime.systemstack but there - // are functions that switch to the system stack manually (for example - // runtime.morestack). - // Since we are only interested in printing the system stack for cgo - // calls we switch directly to the goroutine stack if we detect that the - // function at the top of the stack is a runtime function. + + case "runtime.newstack", "runtime.systemstack": + if it.systemstack && it.g != nil { it.switchToGoroutineStack() return true } diff --git a/pkg/proc/breakpoints.go b/pkg/proc/breakpoints.go index 2e7cdf725c..a9dc06d80a 100644 --- a/pkg/proc/breakpoints.go +++ b/pkg/proc/breakpoints.go @@ -285,7 +285,7 @@ func (bpstate *BreakpointState) checkCond(tgt *Target, breaklet *Breaklet, threa nextDeferOk := true if breaklet.Kind&NextDeferBreakpoint != 0 { var err error - frames, err := ThreadStacktrace(thread, 2) + frames, err := ThreadStacktrace(tgt, thread, 2) if err == nil { nextDeferOk, _ = isPanicCall(frames) if !nextDeferOk { @@ -297,7 +297,7 @@ func (bpstate *BreakpointState) checkCond(tgt *Target, breaklet *Breaklet, threa case WatchOutOfScopeBreakpoint: if breaklet.checkPanicCall { - frames, err := ThreadStacktrace(thread, 2) + frames, err := ThreadStacktrace(tgt, thread, 2) if err == nil { ipc, _ := isPanicCall(frames) active = active && ipc diff --git a/pkg/proc/core/core_test.go b/pkg/proc/core/core_test.go index fa86493466..2744bf442e 100644 --- a/pkg/proc/core/core_test.go +++ b/pkg/proc/core/core_test.go @@ -245,8 +245,8 @@ func logRegisters(t *testing.T, regs proc.Registers, arch *proc.Arch) { } func TestCore(t *testing.T) { - if runtime.GOOS != "linux" || runtime.GOARCH != "amd64" { - return + if runtime.GOOS != "linux" || runtime.GOARCH == "386" { + t.Skip("unsupported") } if runtime.GOOS == "linux" && os.Getenv("CI") == "true" && buildMode == "pie" { t.Skip("disabled on linux, Github Actions, with PIE buildmode") @@ -268,7 +268,7 @@ func TestCore(t *testing.T) { var panickingStack []proc.Stackframe for _, g := range gs { t.Logf("Goroutine %d", g.ID) - stack, err := g.Stacktrace(10, 0) + stack, err := proc.GoroutineStacktrace(p, g, 10, 0) if err != nil { t.Errorf("Stacktrace() on goroutine %v = %v", g, err) } @@ -315,8 +315,11 @@ func TestCore(t *testing.T) { } func TestCoreFpRegisters(t *testing.T) { - if runtime.GOOS != "linux" || runtime.GOARCH != "amd64" { - return + if runtime.GOOS != "linux" || runtime.GOARCH == "386" { + t.Skip("unsupported") + } + if runtime.GOARCH != "amd64" { + t.Skip("test requires amd64") } // in go1.10 the crash is executed on a different thread and registers are // no longer available in the core dump. @@ -334,7 +337,7 @@ func TestCoreFpRegisters(t *testing.T) { var regs proc.Registers for _, thread := range p.ThreadList() { - frames, err := proc.ThreadStacktrace(thread, 10) + frames, err := proc.ThreadStacktrace(p, thread, 10) if err != nil { t.Errorf("ThreadStacktrace for %x = %v", thread.ThreadID(), err) continue @@ -402,8 +405,8 @@ func TestCoreFpRegisters(t *testing.T) { } func TestCoreWithEmptyString(t *testing.T) { - if runtime.GOOS != "linux" || runtime.GOARCH != "amd64" { - return + if runtime.GOOS != "linux" || runtime.GOARCH == "386" { + t.Skip("unsupported") } if runtime.GOOS == "linux" && os.Getenv("CI") == "true" && buildMode == "pie" { t.Skip("disabled on linux, Github Actions, with PIE buildmode") @@ -417,7 +420,7 @@ func TestCoreWithEmptyString(t *testing.T) { var mainFrame *proc.Stackframe mainSearch: for _, g := range gs { - stack, err := g.Stacktrace(10, 0) + stack, err := proc.GoroutineStacktrace(p, g, 10, 0) assertNoError(err, t, "Stacktrace()") for _, frame := range stack { if frame.Current.Fn != nil && frame.Current.Fn.Name == "main.main" { @@ -466,7 +469,7 @@ func TestMinidump(t *testing.T) { t.Logf("%d goroutines", len(gs)) foundMain, foundTime := false, false for _, g := range gs { - stack, err := g.Stacktrace(10, 0) + stack, err := proc.GoroutineStacktrace(p, g, 10, 0) if err != nil { t.Errorf("Stacktrace() on goroutine %v = %v", g, err) } diff --git a/pkg/proc/eval.go b/pkg/proc/eval.go index 83ff7d3dc9..b3b6db6824 100644 --- a/pkg/proc/eval.go +++ b/pkg/proc/eval.go @@ -90,9 +90,9 @@ func ConvertEvalScope(dbp *Target, gid int64, frame, deferCall int) (*EvalScope, var locs []Stackframe if g != nil { - locs, err = g.Stacktrace(frame+1, opts) + locs, err = GoroutineStacktrace(dbp, g, frame+1, opts) } else { - locs, err = ThreadStacktrace(ct, frame+1) + locs, err = ThreadStacktrace(dbp, ct, frame+1) } if err != nil { return nil, err @@ -145,7 +145,7 @@ func FrameToScope(t *Target, thread MemoryReadWriter, g *G, frames ...Stackframe // ThreadScope returns an EvalScope for the given thread. func ThreadScope(t *Target, thread Thread) (*EvalScope, error) { - locations, err := ThreadStacktrace(thread, 1) + locations, err := ThreadStacktrace(t, thread, 1) if err != nil { return nil, err } @@ -157,7 +157,7 @@ func ThreadScope(t *Target, thread Thread) (*EvalScope, error) { // GoroutineScope returns an EvalScope for the goroutine running on the given thread. func GoroutineScope(t *Target, thread Thread) (*EvalScope, error) { - locations, err := ThreadStacktrace(thread, 1) + locations, err := ThreadStacktrace(t, thread, 1) if err != nil { return nil, err } diff --git a/pkg/proc/i386_arch.go b/pkg/proc/i386_arch.go index aaafad0ecf..2bb53cc381 100644 --- a/pkg/proc/i386_arch.go +++ b/pkg/proc/i386_arch.go @@ -152,25 +152,16 @@ func i386SwitchStack(it *stackIterator, _ *op.DwarfRegisters) bool { it.switchToGoroutineStack() return true - default: - if it.systemstack && it.top && it.g != nil && strings.HasPrefix(it.frame.Current.Fn.Name, "runtime.") && it.frame.Current.Fn.Name != "runtime.throw" && it.frame.Current.Fn.Name != "runtime.fatalthrow" { - // The runtime switches to the system stack in multiple places. - // This usually happens through a call to runtime.systemstack but there - // are functions that switch to the system stack manually (for example - // runtime.morestack). - // Since we are only interested in printing the system stack for cgo - // calls we switch directly to the goroutine stack if we detect that the - // function at the top of the stack is a runtime function. - // - // The function "runtime.throw" is deliberately excluded from this - // because it can end up in the stack during a cgo call and switching to - // the goroutine stack will exclude all the C functions from the stack - // trace. + case "runtime.newstack", "runtime.systemstack": + if it.systemstack && it.g != nil { it.switchToGoroutineStack() return true } return false + + default: + return false } } diff --git a/pkg/proc/proc_test.go b/pkg/proc/proc_test.go index 4a145591c2..dffd1e8fb3 100644 --- a/pkg/proc/proc_test.go +++ b/pkg/proc/proc_test.go @@ -778,8 +778,8 @@ func TestRuntimeBreakpoint(t *testing.T) { }) } -func returnAddress(thread proc.Thread) (uint64, error) { - locations, err := proc.ThreadStacktrace(thread, 2) +func returnAddress(tgt *proc.Target, thread proc.Thread) (uint64, error) { + locations, err := proc.ThreadStacktrace(tgt, thread, 2) if err != nil { return 0, err } @@ -797,7 +797,7 @@ func TestFindReturnAddress(t *testing.T) { if err != nil { t.Fatal(err) } - addr, err := returnAddress(p.CurrentThread()) + addr, err := returnAddress(p, p.CurrentThread()) if err != nil { t.Fatal(err) } @@ -816,7 +816,7 @@ func TestFindReturnAddressTopOfStackFn(t *testing.T) { if err := grp.Continue(); err != nil { t.Fatal(err) } - if _, err := returnAddress(p.CurrentThread()); err == nil { + if _, err := returnAddress(p, p.CurrentThread()); err == nil { t.Fatal("expected error to be returned") } }) @@ -913,7 +913,7 @@ func TestStacktrace(t *testing.T) { for i := range stacks { assertNoError(grp.Continue(), t, "Continue()") - locations, err := proc.ThreadStacktrace(p.CurrentThread(), 40) + locations, err := proc.ThreadStacktrace(p, p.CurrentThread(), 40) assertNoError(err, t, "Stacktrace()") if len(locations) != len(stacks[i])+2 { @@ -941,7 +941,7 @@ func TestStacktrace2(t *testing.T) { withTestProcess("retstack", t, func(p *proc.Target, grp *proc.TargetGroup, fixture protest.Fixture) { assertNoError(grp.Continue(), t, "Continue()") - locations, err := proc.ThreadStacktrace(p.CurrentThread(), 40) + locations, err := proc.ThreadStacktrace(p, p.CurrentThread(), 40) assertNoError(err, t, "Stacktrace()") if !stackMatch([]loc{{-1, "main.f"}, {16, "main.main"}}, locations, false) { for i := range locations { @@ -951,7 +951,7 @@ func TestStacktrace2(t *testing.T) { } assertNoError(grp.Continue(), t, "Continue()") - locations, err = proc.ThreadStacktrace(p.CurrentThread(), 40) + locations, err = proc.ThreadStacktrace(p, p.CurrentThread(), 40) assertNoError(err, t, "Stacktrace()") if !stackMatch([]loc{{-1, "main.g"}, {17, "main.main"}}, locations, false) { for i := range locations { @@ -1016,7 +1016,7 @@ func TestStacktraceGoroutine(t *testing.T) { mainCount := 0 for i, g := range gs { - locations, err := g.Stacktrace(40, 0) + locations, err := proc.GoroutineStacktrace(p, g, 40, 0) if err != nil { // On windows we do not have frame information for goroutines doing system calls. t.Logf("Could not retrieve goroutine stack for goid=%d: %v", g.ID, err) @@ -1174,7 +1174,7 @@ func TestIssue239(t *testing.T) { } func findFirstNonRuntimeFrame(p *proc.Target) (proc.Stackframe, error) { - frames, err := proc.ThreadStacktrace(p.CurrentThread(), 10) + frames, err := proc.ThreadStacktrace(p, p.CurrentThread(), 10) if err != nil { return proc.Stackframe{}, err } @@ -1328,7 +1328,7 @@ func TestFrameEvaluation(t *testing.T) { found := make([]bool, 10) for _, g := range gs { frame := -1 - frames, err := g.Stacktrace(40, 0) + frames, err := proc.GoroutineStacktrace(p, g, 40, 0) if err != nil { t.Logf("could not stacktrace goroutine %d: %v\n", g.ID, err) continue @@ -1375,7 +1375,7 @@ func TestFrameEvaluation(t *testing.T) { g, err := proc.GetG(p.CurrentThread()) assertNoError(err, t, "GetG()") - frames, err := g.Stacktrace(40, 0) + frames, err := proc.GoroutineStacktrace(p, g, 40, 0) assertNoError(err, t, "Stacktrace()") t.Logf("Goroutine %d %#v", g.ID, g.Thread) logStacktrace(t, p, frames) @@ -1916,7 +1916,7 @@ func TestIssue332_Part1(t *testing.T) { setFileBreakpoint(p, t, fixture.Source, 8) assertNoError(grp.Continue(), t, "Continue()") assertNoError(grp.Next(), t, "first Next()") - locations, err := proc.ThreadStacktrace(p.CurrentThread(), 2) + locations, err := proc.ThreadStacktrace(p, p.CurrentThread(), 2) assertNoError(err, t, "Stacktrace()") if locations[0].Call.Fn == nil { t.Fatalf("Not on a function") @@ -1943,7 +1943,7 @@ func TestIssue332_Part2(t *testing.T) { // step until we enter changeMe for { assertNoError(grp.Step(), t, "Step()") - locations, err := proc.ThreadStacktrace(p.CurrentThread(), 2) + locations, err := proc.ThreadStacktrace(p, p.CurrentThread(), 2) assertNoError(err, t, "Stacktrace()") if locations[0].Call.Fn == nil { t.Fatalf("Not on a function") @@ -2117,7 +2117,7 @@ func TestIssue462(t *testing.T) { }() assertNoError(grp.Continue(), t, "Continue()") - _, err := proc.ThreadStacktrace(p.CurrentThread(), 40) + _, err := proc.ThreadStacktrace(p, p.CurrentThread(), 40) assertNoError(err, t, "Stacktrace()") }) } @@ -2148,7 +2148,7 @@ func TestNextParked(t *testing.T) { if g.Thread != nil { continue } - frames, _ := g.Stacktrace(5, 0) + frames, _ := proc.GoroutineStacktrace(p, g, 5, 0) for _, frame := range frames { // line 11 is the line where wg.Done is called if frame.Current.Fn != nil && frame.Current.Fn.Name == "main.sayhi" && frame.Current.Line < 11 { @@ -2200,7 +2200,7 @@ func TestStepParked(t *testing.T) { } t.Logf("Parked g is: %v\n", parkedg) - frames, _ := parkedg.Stacktrace(20, 0) + frames, _ := proc.GoroutineStacktrace(p, parkedg, 20, 0) for _, frame := range frames { name := "" if frame.Call.Fn != nil { @@ -2464,7 +2464,7 @@ func TestStepConcurrentDirect(t *testing.T) { // loop exited break } - frames, err := proc.ThreadStacktrace(p.CurrentThread(), 20) + frames, err := proc.ThreadStacktrace(p, p.CurrentThread(), 20) if err != nil { t.Errorf("Could not get stacktrace of goroutine %d\n", p.SelectedGoroutine().ID) } else { @@ -3357,9 +3357,10 @@ func TestCgoStacktrace(t *testing.T) { t.Skip("disabled on macOS with go before version 1.8") } } - skipOn(t, "broken - cgo stacktraces", "386") - skipOn(t, "broken - cgo stacktraces", "windows", "arm64") + if !goversion.VersionAfterOrEqual(runtime.Version(), 1, 21) { + skipOn(t, "broken - cgo stacktraces", "windows", "arm64") + } protest.MustHaveCgo(t) // Tests that: @@ -3401,7 +3402,7 @@ func TestCgoStacktrace(t *testing.T) { } } - frames, err := g.Stacktrace(100, 0) + frames, err := proc.GoroutineStacktrace(p, g, 100, 0) assertNoError(err, t, fmt.Sprintf("Stacktrace at iteration step %d", itidx)) logStacktrace(t, p, frames) @@ -3431,7 +3432,7 @@ func TestCgoStacktrace(t *testing.T) { } // also check that ThreadStacktrace produces the same list of frames - threadFrames, err := proc.ThreadStacktrace(p.CurrentThread(), 100) + threadFrames, err := proc.ThreadStacktrace(p, p.CurrentThread(), 100) assertNoError(err, t, fmt.Sprintf("ThreadStacktrace at iteration step %d", itidx)) if len(threadFrames) != len(frames) { @@ -3493,7 +3494,7 @@ func TestSystemstackStacktrace(t *testing.T) { assertNoError(grp.Continue(), t, "second continue") g, err := proc.GetG(p.CurrentThread()) assertNoError(err, t, "GetG") - frames, err := g.Stacktrace(100, 0) + frames, err := proc.GoroutineStacktrace(p, g, 100, 0) assertNoError(err, t, "stacktrace") logStacktrace(t, p, frames) m := stacktraceCheck(t, []string{"!runtime.startpanic_m", "runtime.gopanic", "main.main"}, frames) @@ -3526,7 +3527,7 @@ func TestSystemstackOnRuntimeNewstack(t *testing.T) { break } } - frames, err := g.Stacktrace(100, 0) + frames, err := proc.GoroutineStacktrace(p, g, 100, 0) assertNoError(err, t, "stacktrace") logStacktrace(t, p, frames) m := stacktraceCheck(t, []string{"!runtime.newstack", "main.main"}, frames) @@ -3545,7 +3546,7 @@ func TestIssue1034(t *testing.T) { withTestProcess("cgostacktest/", t, func(p *proc.Target, grp *proc.TargetGroup, fixture protest.Fixture) { setFunctionBreakpoint(p, t, "main.main") assertNoError(grp.Continue(), t, "Continue()") - frames, err := p.SelectedGoroutine().Stacktrace(10, 0) + frames, err := proc.GoroutineStacktrace(p, p.SelectedGoroutine(), 10, 0) assertNoError(err, t, "Stacktrace") scope := proc.FrameToScope(p, p.Memory(), nil, frames[2:]...) args, _ := scope.FunctionArguments(normalLoadConfig) @@ -3862,7 +3863,7 @@ func TestInlinedStacktraceAndVariables(t *testing.T) { // first inlined call assertNoError(grp.Continue(), t, "Continue") - frames, err := proc.ThreadStacktrace(p.CurrentThread(), 20) + frames, err := proc.ThreadStacktrace(p, p.CurrentThread(), 20) assertNoError(err, t, "ThreadStacktrace") t.Logf("Stacktrace:\n") for i := range frames { @@ -3889,7 +3890,7 @@ func TestInlinedStacktraceAndVariables(t *testing.T) { // second inlined call assertNoError(grp.Continue(), t, "Continue") - frames, err = proc.ThreadStacktrace(p.CurrentThread(), 20) + frames, err = proc.ThreadStacktrace(p, p.CurrentThread(), 20) assertNoError(err, t, "ThreadStacktrace (2)") t.Logf("Stacktrace 2:\n") for i := range frames { @@ -4204,7 +4205,7 @@ func TestIssue1264(t *testing.T) { func TestReadDefer(t *testing.T) { withTestProcess("deferstack", t, func(p *proc.Target, grp *proc.TargetGroup, fixture protest.Fixture) { assertNoError(grp.Continue(), t, "Continue") - frames, err := p.SelectedGoroutine().Stacktrace(10, proc.StacktraceReadDefers) + frames, err := proc.GoroutineStacktrace(p, p.SelectedGoroutine(), 10, proc.StacktraceReadDefers) assertNoError(err, t, "Stacktrace") logStacktrace(t, p, frames) @@ -4415,7 +4416,7 @@ func TestIssue1469(t *testing.T) { t.Logf("too many threads running goroutine %d", gid) for _, thread := range gid2thread[gid] { t.Logf("\tThread %d", thread.ThreadID()) - frames, err := proc.ThreadStacktrace(thread, 20) + frames, err := proc.ThreadStacktrace(p, thread, 20) if err != nil { t.Logf("\t\tcould not get stacktrace %v", err) } @@ -4646,9 +4647,12 @@ func TestIssue1615(t *testing.T) { } func TestCgoStacktrace2(t *testing.T) { - skipOn(t, "upstream issue", "windows") - skipOn(t, "broken", "386") - skipOn(t, "broken", "arm64") + if !goversion.VersionAfterOrEqual(runtime.Version(), 1, 21) { + skipOn(t, "upstream issue", "windows") + skipOn(t, "broken", "arm64") + skipOn(t, "broken", "386") + } + skipOn(t, "broken - cgo stacktraces", "darwin", "arm64") protest.MustHaveCgo(t) // If a panic happens during cgo execution the stacktrace should show the C // function that caused the problem. @@ -4657,7 +4661,7 @@ func TestCgoStacktrace2(t *testing.T) { if _, exited := err.(proc.ErrProcessExited); exited { t.Fatal("process exited") } - frames, err := proc.ThreadStacktrace(p.CurrentThread(), 100) + frames, err := proc.ThreadStacktrace(p, p.CurrentThread(), 100) assertNoError(err, t, "Stacktrace()") logStacktrace(t, p, frames) m := stacktraceCheck(t, []string{"C.sigsegv", "C.testfn", "main.main"}, frames) @@ -4768,7 +4772,7 @@ func TestIssue1795(t *testing.T) { assertNoError(grp.Continue(), t, "Continue()") assertLineNumber(p, t, 12, "wrong line number after Continue (1),") assertNoError(grp.Continue(), t, "Continue()") - frames, err := proc.ThreadStacktrace(p.CurrentThread(), 40) + frames, err := proc.ThreadStacktrace(p, p.CurrentThread(), 40) assertNoError(err, t, "ThreadStacktrace()") logStacktrace(t, p, frames) if err := checkFrame(frames[0], "regexp.(*Regexp).doExecute", "", 0, false); err != nil { @@ -5109,7 +5113,7 @@ func TestStepOutPreservesGoroutine(t *testing.T) { bestg := []*proc.G{} for _, g := range gs { t.Logf("stacktracing goroutine %d (%v)\n", g.ID, g.CurrentLoc) - frames, err := g.Stacktrace(20, 0) + frames, err := proc.GoroutineStacktrace(p, g, 20, 0) assertNoError(err, t, "Stacktrace") for _, frame := range frames { if frame.Call.Fn != nil && frame.Call.Fn.Name == "main.coroutine" { @@ -5280,9 +5284,9 @@ func TestDump(t *testing.T) { t.Errorf("Goroutine mismatch\nlive:\t%s\ncore:\t%s", convertGoroutine(gos[i]), convertGoroutine(cgos[i])) } - frames, err := gos[i].Stacktrace(20, 0) + frames, err := proc.GoroutineStacktrace(p, gos[i], 20, 0) assertNoError(err, t, fmt.Sprintf("Stacktrace for goroutine %d - live process", gos[i].ID)) - cframes, err := cgos[i].Stacktrace(20, 0) + cframes, err := proc.GoroutineStacktrace(c, cgos[i], 20, 0) assertNoError(err, t, fmt.Sprintf("Stacktrace for goroutine %d - core dump", gos[i].ID)) if len(frames) != len(cframes) { @@ -5955,7 +5959,7 @@ func TestStacktraceExtlinkMac(t *testing.T) { withTestProcess("issue3194", t, func(p *proc.Target, grp *proc.TargetGroup, fixture protest.Fixture) { setFunctionBreakpoint(p, t, "main.main") assertNoError(grp.Continue(), t, "First Continue()") - frames, err := proc.ThreadStacktrace(p.CurrentThread(), 10) + frames, err := proc.ThreadStacktrace(p, p.CurrentThread(), 10) assertNoError(err, t, "ThreadStacktrace") logStacktrace(t, p, frames) if len(frames) < 2 || frames[0].Call.Fn.Name != "main.main" || frames[1].Call.Fn.Name != "runtime.main" { diff --git a/pkg/proc/stack.go b/pkg/proc/stack.go index 567a42103d..31e36df785 100644 --- a/pkg/proc/stack.go +++ b/pkg/proc/stack.go @@ -93,7 +93,7 @@ func (frame *Stackframe) FramePointerOffset() int64 { // ThreadStacktrace returns the stack trace for thread. // Note the locations in the array are return addresses not call addresses. -func ThreadStacktrace(thread Thread, depth int) ([]Stackframe, error) { +func ThreadStacktrace(tgt *Target, thread Thread, depth int) ([]Stackframe, error) { g, _ := GetG(thread) if g == nil { regs, err := thread.Registers() @@ -103,13 +103,13 @@ func ThreadStacktrace(thread Thread, depth int) ([]Stackframe, error) { so := thread.BinInfo().PCToImage(regs.PC()) dwarfRegs := *(thread.BinInfo().Arch.RegistersToDwarfRegisters(so.StaticBase, regs)) dwarfRegs.ChangeFunc = thread.SetReg - it := newStackIterator(thread.BinInfo(), thread.ProcessMemory(), dwarfRegs, 0, nil, 0) + it := newStackIterator(tgt, thread.BinInfo(), thread.ProcessMemory(), dwarfRegs, 0, nil, 0) return it.stacktrace(depth) } - return g.Stacktrace(depth, 0) + return GoroutineStacktrace(tgt, g, depth, 0) } -func (g *G) stackIterator(opts StacktraceOptions) (*stackIterator, error) { +func goroutineStackIterator(tgt *Target, g *G, opts StacktraceOptions) (*stackIterator, error) { bi := g.variable.bi if g.Thread != nil { regs, err := g.Thread.Registers() @@ -120,13 +120,13 @@ func (g *G) stackIterator(opts StacktraceOptions) (*stackIterator, error) { dwarfRegs := *(bi.Arch.RegistersToDwarfRegisters(so.StaticBase, regs)) dwarfRegs.ChangeFunc = g.Thread.SetReg return newStackIterator( - bi, g.variable.mem, + tgt, bi, g.variable.mem, dwarfRegs, g.stack.hi, g, opts), nil } so := g.variable.bi.PCToImage(g.PC) return newStackIterator( - bi, g.variable.mem, + tgt, bi, g.variable.mem, bi.Arch.addrAndStackRegsToDwarfRegisters(so.StaticBase, g.PC, g.SP, g.BP, g.LR), g.stack.hi, g, opts), nil } @@ -147,10 +147,10 @@ const ( StacktraceG ) -// Stacktrace returns the stack trace for a goroutine. +// GoroutineStacktrace returns the stack trace for a goroutine. // Note the locations in the array are return addresses not call addresses. -func (g *G) Stacktrace(depth int, opts StacktraceOptions) ([]Stackframe, error) { - it, err := g.stackIterator(opts) +func GoroutineStacktrace(tgt *Target, g *G, depth int, opts StacktraceOptions) ([]Stackframe, error) { + it, err := goroutineStackIterator(tgt, g, opts) if err != nil { return nil, err } @@ -175,13 +175,14 @@ func (n NullAddrError) Error() string { // required to iterate and walk the program // stack. type stackIterator struct { - pc uint64 - top bool - atend bool - frame Stackframe - bi *BinaryInfo - mem MemoryReadWriter - err error + pc uint64 + top bool + atend bool + frame Stackframe + target *Target + bi *BinaryInfo + mem MemoryReadWriter + err error stackhi uint64 systemstack bool @@ -196,12 +197,12 @@ type stackIterator struct { opts StacktraceOptions } -func newStackIterator(bi *BinaryInfo, mem MemoryReadWriter, regs op.DwarfRegisters, stackhi uint64, g *G, opts StacktraceOptions) *stackIterator { +func newStackIterator(tgt *Target, bi *BinaryInfo, mem MemoryReadWriter, regs op.DwarfRegisters, stackhi uint64, g *G, opts StacktraceOptions) *stackIterator { systemstack := true if g != nil { systemstack = g.SystemStack } - return &stackIterator{pc: regs.PC(), regs: regs, top: true, bi: bi, mem: mem, err: nil, atend: false, stackhi: stackhi, systemstack: systemstack, g: g, opts: opts} + return &stackIterator{pc: regs.PC(), regs: regs, top: true, target: tgt, bi: bi, mem: mem, err: nil, atend: false, stackhi: stackhi, systemstack: systemstack, g: g, opts: opts} } // Next points the iterator to the next stack frame. @@ -237,6 +238,24 @@ func (it *stackIterator) Next() bool { logger.Debugf("new frame %#x %s:%d at %s", it.frame.Call.PC, it.frame.Call.File, it.frame.Call.Line, fnname) } + if it.frame.Current.Fn != nil && it.frame.Current.Fn.Name == "runtime.sigtrampgo" && it.target != nil { + regs, err := it.readSigtrampgoContext() + if err != nil { + logflags.DebuggerLogger().Errorf("could not read runtime.sigtrampgo context: %v", err) + } else { + so := it.bi.PCToImage(regs.PC()) + regs.StaticBase = so.StaticBase + it.pc = regs.PC() + it.regs = *regs + it.top = false + if it.g != nil && it.g.ID != 0 { + it.systemstack = !(uint64(it.regs.SP()) >= it.g.stack.lo && uint64(it.regs.SP()) < it.g.stack.hi) + } + logflags.StackLogger().Debugf("sigtramp context read") + return true + } + } + if it.opts&StacktraceSimple == 0 { if it.bi.Arch.switchStack(it, &callFrameRegs) { logflags.StackLogger().Debugf("stack switched") diff --git a/pkg/proc/stack_sigtramp.go b/pkg/proc/stack_sigtramp.go new file mode 100644 index 0000000000..da7426747f --- /dev/null +++ b/pkg/proc/stack_sigtramp.go @@ -0,0 +1,773 @@ +package proc + +import ( + "encoding/binary" + "errors" + "fmt" + "unsafe" + + "github.com/go-delve/delve/pkg/dwarf/op" + "github.com/go-delve/delve/pkg/dwarf/regnum" + "github.com/go-delve/delve/pkg/logflags" +) + +// readSigtrampgoContext reads runtime.sigtrampgo context at the specified address +func (it *stackIterator) readSigtrampgoContext() (*op.DwarfRegisters, error) { + logger := logflags.DebuggerLogger() + scope := FrameToScope(it.target, it.mem, it.g, it.frame) + bi := it.bi + + findvar := func(name string) *Variable { + vars, _ := scope.Locals(0) + for i := range vars { + if vars[i].Name == name { + return vars[i] + } + } + return nil + } + + deref := func(v *Variable) (uint64, error) { + v.loadValue(loadSingleValue) + if v.Unreadable != nil { + return 0, fmt.Errorf("could not dereference %s: %v", v.Name, v.Unreadable) + } + if len(v.Children) < 1 { + return 0, fmt.Errorf("could not dereference %s (no children?)", v.Name) + } + logger.Debugf("%s address is %#x", v.Name, v.Children[0].Addr) + return v.Children[0].Addr, nil + } + + getctxaddr := func() (uint64, error) { + ctxvar := findvar("ctx") + if ctxvar == nil { + return 0, errors.New("ctx variable not found") + } + addr, err := deref(ctxvar) + if err != nil { + return 0, err + } + return addr, nil + } + + switch bi.GOOS { + case "windows": + epvar := findvar("ep") + if epvar == nil { + return nil, errors.New("ep variable not found") + } + epaddr, err := deref(epvar) + if err != nil { + return nil, err + } + switch bi.Arch.Name { + case "amd64": + return sigtrampContextWindowsAMD64(it.mem, epaddr) + case "arm64": + return sigtrampContextWindowsARM64(it.mem, epaddr) + default: + return nil, errors.New("not implemented") + } + case "linux": + addr, err := getctxaddr() + if err != nil { + return nil, err + } + + switch bi.Arch.Name { + case "386": + return sigtrampContextLinux386(it.mem, addr) + case "amd64": + return sigtrampContextLinuxAMD64(it.mem, addr) + case "arm64": + return sigtrampContextLinuxARM64(it.mem, addr) + default: + return nil, errors.New("not implemented") + } + case "freebsd": + addr, err := getctxaddr() + if err != nil { + return nil, err + } + return sigtrampContextFreebsdAMD64(it.mem, addr) + case "darwin": + addr, err := getctxaddr() + if err != nil { + return nil, err + } + switch bi.Arch.Name { + case "amd64": + return sigtrampContextDarwinAMD64(it.mem, addr) + case "arm64": + return sigtrampContextDarwinARM64(it.mem, addr) + default: + return nil, errors.New("not implemnted") + } + default: + return nil, errors.New("not implemented") + } +} + +func sigtrampContextLinuxAMD64(mem MemoryReader, addr uint64) (*op.DwarfRegisters, error) { + type stackt struct { + ss_sp uint64 + ss_flags int32 + pad_cgo_0 [4]byte + ss_size uintptr + } + + type mcontext struct { + r8 uint64 + r9 uint64 + r10 uint64 + r11 uint64 + r12 uint64 + r13 uint64 + r14 uint64 + r15 uint64 + rdi uint64 + rsi uint64 + rbp uint64 + rbx uint64 + rdx uint64 + rax uint64 + rcx uint64 + rsp uint64 + rip uint64 + eflags uint64 + cs uint16 + gs uint16 + fs uint16 + __pad0 uint16 + err uint64 + trapno uint64 + oldmask uint64 + cr2 uint64 + fpstate uint64 // pointer + __reserved1 [8]uint64 + } + + type fpxreg struct { + significand [4]uint16 + exponent uint16 + padding [3]uint16 + } + + type fpstate struct { + cwd uint16 + swd uint16 + ftw uint16 + fop uint16 + rip uint64 + rdp uint64 + mxcsr uint32 + mxcr_mask uint32 + _st [8]fpxreg + _xmm [16][4]uint32 + padding [24]uint32 + } + + type ucontext struct { + uc_flags uint64 + uc_link uint64 + uc_stack stackt + uc_mcontext mcontext + uc_sigmask [16]uint64 + __fpregs_mem fpstate + } + + buf := make([]byte, unsafe.Sizeof(ucontext{})) + _, err := mem.ReadMemory(buf, addr) + if err != nil { + return nil, err + } + regs := &(((*ucontext)(unsafe.Pointer(&buf[0]))).uc_mcontext) + dregs := make([]*op.DwarfRegister, regnum.AMD64MaxRegNum()+1) + dregs[regnum.AMD64_R8] = op.DwarfRegisterFromUint64(regs.r8) + dregs[regnum.AMD64_R9] = op.DwarfRegisterFromUint64(regs.r9) + dregs[regnum.AMD64_R10] = op.DwarfRegisterFromUint64(regs.r10) + dregs[regnum.AMD64_R11] = op.DwarfRegisterFromUint64(regs.r11) + dregs[regnum.AMD64_R12] = op.DwarfRegisterFromUint64(regs.r12) + dregs[regnum.AMD64_R13] = op.DwarfRegisterFromUint64(regs.r13) + dregs[regnum.AMD64_R14] = op.DwarfRegisterFromUint64(regs.r14) + dregs[regnum.AMD64_R15] = op.DwarfRegisterFromUint64(regs.r15) + dregs[regnum.AMD64_Rdi] = op.DwarfRegisterFromUint64(regs.rdi) + dregs[regnum.AMD64_Rsi] = op.DwarfRegisterFromUint64(regs.rsi) + dregs[regnum.AMD64_Rbp] = op.DwarfRegisterFromUint64(regs.rbp) + dregs[regnum.AMD64_Rbx] = op.DwarfRegisterFromUint64(regs.rbx) + dregs[regnum.AMD64_Rdx] = op.DwarfRegisterFromUint64(regs.rdx) + dregs[regnum.AMD64_Rax] = op.DwarfRegisterFromUint64(regs.rax) + dregs[regnum.AMD64_Rcx] = op.DwarfRegisterFromUint64(regs.rcx) + dregs[regnum.AMD64_Rsp] = op.DwarfRegisterFromUint64(regs.rsp) + dregs[regnum.AMD64_Rip] = op.DwarfRegisterFromUint64(regs.rip) + dregs[regnum.AMD64_Rflags] = op.DwarfRegisterFromUint64(regs.eflags) + dregs[regnum.AMD64_Cs] = op.DwarfRegisterFromUint64(uint64(regs.cs)) + dregs[regnum.AMD64_Gs] = op.DwarfRegisterFromUint64(uint64(regs.gs)) + dregs[regnum.AMD64_Fs] = op.DwarfRegisterFromUint64(uint64(regs.fs)) + return op.NewDwarfRegisters(0, dregs, binary.LittleEndian, regnum.AMD64_Rip, regnum.AMD64_Rsp, regnum.AMD64_Rbp, 0), nil +} + +func sigtrampContextLinux386(mem MemoryReader, addr uint64) (*op.DwarfRegisters, error) { + type sigcontext struct { + gs uint16 + __gsh uint16 + fs uint16 + __fsh uint16 + es uint16 + __esh uint16 + ds uint16 + __dsh uint16 + edi uint32 + esi uint32 + ebp uint32 + esp uint32 + ebx uint32 + edx uint32 + ecx uint32 + eax uint32 + trapno uint32 + err uint32 + eip uint32 + cs uint16 + __csh uint16 + eflags uint32 + esp_at_signal uint32 + ss uint16 + __ssh uint16 + fpstate uint32 // pointer + oldmask uint32 + cr2 uint32 + } + + type stackt struct { + ss_sp uint32 // pointer + ss_flags int32 + ss_size uint32 + } + + type ucontext struct { + uc_flags uint32 + uc_link uint32 // pointer + uc_stack stackt + uc_mcontext sigcontext + uc_sigmask uint32 + } + + buf := make([]byte, unsafe.Sizeof(ucontext{})) + _, err := mem.ReadMemory(buf, addr) + if err != nil { + return nil, err + } + regs := &(((*ucontext)(unsafe.Pointer(&buf[0]))).uc_mcontext) + dregs := make([]*op.DwarfRegister, regnum.I386MaxRegNum()+1) + dregs[regnum.I386_Gs] = op.DwarfRegisterFromUint64(uint64(regs.gs)) + dregs[regnum.I386_Fs] = op.DwarfRegisterFromUint64(uint64(regs.fs)) + dregs[regnum.I386_Es] = op.DwarfRegisterFromUint64(uint64(regs.es)) + dregs[regnum.I386_Ds] = op.DwarfRegisterFromUint64(uint64(regs.ds)) + dregs[regnum.I386_Edi] = op.DwarfRegisterFromUint64(uint64(regs.edi)) + dregs[regnum.I386_Esi] = op.DwarfRegisterFromUint64(uint64(regs.esi)) + dregs[regnum.I386_Ebp] = op.DwarfRegisterFromUint64(uint64(regs.ebp)) + dregs[regnum.I386_Esp] = op.DwarfRegisterFromUint64(uint64(regs.esp)) + dregs[regnum.I386_Ebx] = op.DwarfRegisterFromUint64(uint64(regs.ebx)) + dregs[regnum.I386_Edx] = op.DwarfRegisterFromUint64(uint64(regs.edx)) + dregs[regnum.I386_Ecx] = op.DwarfRegisterFromUint64(uint64(regs.ecx)) + dregs[regnum.I386_Eax] = op.DwarfRegisterFromUint64(uint64(regs.eax)) + dregs[regnum.I386_Eip] = op.DwarfRegisterFromUint64(uint64(regs.eip)) + dregs[regnum.I386_Cs] = op.DwarfRegisterFromUint64(uint64(regs.cs)) + dregs[regnum.I386_Ss] = op.DwarfRegisterFromUint64(uint64(regs.ss)) + return op.NewDwarfRegisters(0, dregs, binary.LittleEndian, regnum.I386_Eip, regnum.I386_Esp, regnum.I386_Ebp, 0), nil +} + +func sigtrampContextLinuxARM64(mem MemoryReader, addr uint64) (*op.DwarfRegisters, error) { + type sigcontext struct { + fault_address uint64 + regs [31]uint64 + sp uint64 + pc uint64 + pstate uint64 + _pad [8]byte + __reserved [4096]byte + } + + type stackt struct { + ss_sp uint64 // pointer + ss_flags int32 + pad_cgo_0 [4]byte + ss_size uint64 + } + + type ucontext struct { + uc_flags uint64 + uc_link uint64 // pointer + uc_stack stackt + uc_sigmask uint64 + _pad [(1024 - 64) / 8]byte + _pad2 [8]byte + uc_mcontext sigcontext + } + + buf := make([]byte, unsafe.Sizeof(ucontext{})) + _, err := mem.ReadMemory(buf, addr) + if err != nil { + return nil, err + } + regs := &(((*ucontext)(unsafe.Pointer(&buf[0]))).uc_mcontext) + dregs := make([]*op.DwarfRegister, regnum.ARM64MaxRegNum()+1) + for i := range regs.regs { + dregs[regnum.ARM64_X0+i] = op.DwarfRegisterFromUint64(regs.regs[i]) + } + dregs[regnum.ARM64_SP] = op.DwarfRegisterFromUint64(regs.sp) + dregs[regnum.ARM64_PC] = op.DwarfRegisterFromUint64(regs.pc) + return op.NewDwarfRegisters(0, dregs, binary.LittleEndian, regnum.ARM64_PC, regnum.ARM64_SP, regnum.ARM64_BP, regnum.ARM64_LR), nil +} + +func sigtrampContextFreebsdAMD64(mem MemoryReader, addr uint64) (*op.DwarfRegisters, error) { + type mcontext struct { + mc_onstack uint64 + mc_rdi uint64 + mc_rsi uint64 + mc_rdx uint64 + mc_rcx uint64 + mc_r8 uint64 + mc_r9 uint64 + mc_rax uint64 + mc_rbx uint64 + mc_rbp uint64 + mc_r10 uint64 + mc_r11 uint64 + mc_r12 uint64 + mc_r13 uint64 + mc_r14 uint64 + mc_r15 uint64 + mc_trapno uint32 + mc_fs uint16 + mc_gs uint16 + mc_addr uint64 + mc_flags uint32 + mc_es uint16 + mc_ds uint16 + mc_err uint64 + mc_rip uint64 + mc_cs uint64 + mc_rflags uint64 + mc_rsp uint64 + mc_ss uint64 + mc_len uint64 + mc_fpformat uint64 + mc_ownedfp uint64 + mc_fpstate [64]uint64 + mc_fsbase uint64 + mc_gsbase uint64 + mc_xfpustate uint64 + mc_xfpustate_len uint64 + mc_spare [4]uint64 + } + + type ucontext struct { + uc_sigmask struct { + __bits [4]uint32 + } + uc_mcontext mcontext + uc_link uint64 // pointer + uc_stack struct { + ss_sp uintptr + ss_size uintptr + ss_flags int32 + pad_cgo_0 [4]byte + } + uc_flags int32 + __spare__ [4]int32 + pad_cgo_0 [12]byte + } + + buf := make([]byte, unsafe.Sizeof(ucontext{})) + _, err := mem.ReadMemory(buf, addr) + if err != nil { + return nil, err + } + mctxt := ((*ucontext)(unsafe.Pointer(&buf[0]))).uc_mcontext + + dregs := make([]*op.DwarfRegister, regnum.AMD64MaxRegNum()+1) + + dregs[regnum.AMD64_Rdi] = op.DwarfRegisterFromUint64(mctxt.mc_rdi) + dregs[regnum.AMD64_Rsi] = op.DwarfRegisterFromUint64(mctxt.mc_rsi) + dregs[regnum.AMD64_Rdx] = op.DwarfRegisterFromUint64(mctxt.mc_rdx) + dregs[regnum.AMD64_Rcx] = op.DwarfRegisterFromUint64(mctxt.mc_rcx) + dregs[regnum.AMD64_R8] = op.DwarfRegisterFromUint64(mctxt.mc_r8) + dregs[regnum.AMD64_R9] = op.DwarfRegisterFromUint64(mctxt.mc_r9) + dregs[regnum.AMD64_Rax] = op.DwarfRegisterFromUint64(mctxt.mc_rax) + dregs[regnum.AMD64_Rbx] = op.DwarfRegisterFromUint64(mctxt.mc_rbx) + dregs[regnum.AMD64_Rbp] = op.DwarfRegisterFromUint64(mctxt.mc_rbp) + dregs[regnum.AMD64_R10] = op.DwarfRegisterFromUint64(mctxt.mc_r10) + dregs[regnum.AMD64_R11] = op.DwarfRegisterFromUint64(mctxt.mc_r11) + dregs[regnum.AMD64_R12] = op.DwarfRegisterFromUint64(mctxt.mc_r12) + dregs[regnum.AMD64_R13] = op.DwarfRegisterFromUint64(mctxt.mc_r13) + dregs[regnum.AMD64_R14] = op.DwarfRegisterFromUint64(mctxt.mc_r14) + dregs[regnum.AMD64_R15] = op.DwarfRegisterFromUint64(mctxt.mc_r15) + dregs[regnum.AMD64_Fs] = op.DwarfRegisterFromUint64(uint64(mctxt.mc_fs)) + dregs[regnum.AMD64_Gs] = op.DwarfRegisterFromUint64(uint64(mctxt.mc_gs)) + dregs[regnum.AMD64_Es] = op.DwarfRegisterFromUint64(uint64(mctxt.mc_es)) + dregs[regnum.AMD64_Ds] = op.DwarfRegisterFromUint64(uint64(mctxt.mc_ds)) + dregs[regnum.AMD64_Rip] = op.DwarfRegisterFromUint64(mctxt.mc_rip) + dregs[regnum.AMD64_Cs] = op.DwarfRegisterFromUint64(mctxt.mc_cs) + dregs[regnum.AMD64_Rflags] = op.DwarfRegisterFromUint64(mctxt.mc_rflags) + dregs[regnum.AMD64_Rsp] = op.DwarfRegisterFromUint64(mctxt.mc_rsp) + dregs[regnum.AMD64_Ss] = op.DwarfRegisterFromUint64(mctxt.mc_ss) + + return op.NewDwarfRegisters(0, dregs, binary.LittleEndian, regnum.AMD64_Rip, regnum.AMD64_Rsp, regnum.AMD64_Rbp, 0), nil +} + +func sigtrampContextFromExceptionPointers(mem MemoryReader, addr uint64) (uint64, error) { + type exceptionpointers struct { + record uint64 // pointer + context uint64 // pointer + } + buf := make([]byte, unsafe.Sizeof(exceptionpointers{})) + _, err := mem.ReadMemory(buf, addr) + if err != nil { + return 0, err + } + return ((*exceptionpointers)(unsafe.Pointer(&buf[0]))).context, nil + +} + +func sigtrampContextWindowsAMD64(mem MemoryReader, addr uint64) (*op.DwarfRegisters, error) { + type context struct { + p1home uint64 + p2home uint64 + p3home uint64 + p4home uint64 + p5home uint64 + p6home uint64 + contextflags uint32 + mxcsr uint32 + segcs uint16 + segds uint16 + seges uint16 + segfs uint16 + seggs uint16 + segss uint16 + eflags uint32 + dr0 uint64 + dr1 uint64 + dr2 uint64 + dr3 uint64 + dr6 uint64 + dr7 uint64 + rax uint64 + rcx uint64 + rdx uint64 + rbx uint64 + rsp uint64 + rbp uint64 + rsi uint64 + rdi uint64 + r8 uint64 + r9 uint64 + r10 uint64 + r11 uint64 + r12 uint64 + r13 uint64 + r14 uint64 + r15 uint64 + rip uint64 + anon0 [512]byte + vectorregister [26]struct { + low uint64 + high int64 + } + vectorcontrol uint64 + debugcontrol uint64 + lastbranchtorip uint64 + lastbranchfromrip uint64 + lastexceptiontorip uint64 + lastexceptionfromrip uint64 + } + + ctxtaddr, err := sigtrampContextFromExceptionPointers(mem, addr) + if err != nil { + return nil, err + } + buf := make([]byte, unsafe.Sizeof(context{})) + _, err = mem.ReadMemory(buf, ctxtaddr) + if err != nil { + return nil, fmt.Errorf("could not read context: %v", err) + } + ctxt := (*context)(unsafe.Pointer(&buf[0])) + + dregs := make([]*op.DwarfRegister, regnum.AMD64MaxRegNum()+1) + + dregs[regnum.AMD64_Cs] = op.DwarfRegisterFromUint64(uint64(ctxt.segcs)) + dregs[regnum.AMD64_Ds] = op.DwarfRegisterFromUint64(uint64(ctxt.segds)) + dregs[regnum.AMD64_Es] = op.DwarfRegisterFromUint64(uint64(ctxt.seges)) + dregs[regnum.AMD64_Fs] = op.DwarfRegisterFromUint64(uint64(ctxt.segfs)) + dregs[regnum.AMD64_Fs] = op.DwarfRegisterFromUint64(uint64(ctxt.seggs)) + dregs[regnum.AMD64_Ss] = op.DwarfRegisterFromUint64(uint64(ctxt.segss)) + dregs[regnum.AMD64_Rflags] = op.DwarfRegisterFromUint64(uint64(ctxt.eflags)) + dregs[regnum.AMD64_Rax] = op.DwarfRegisterFromUint64(ctxt.rax) + dregs[regnum.AMD64_Rcx] = op.DwarfRegisterFromUint64(ctxt.rcx) + dregs[regnum.AMD64_Rdx] = op.DwarfRegisterFromUint64(ctxt.rdx) + dregs[regnum.AMD64_Rbx] = op.DwarfRegisterFromUint64(ctxt.rbx) + dregs[regnum.AMD64_Rsp] = op.DwarfRegisterFromUint64(ctxt.rsp) + dregs[regnum.AMD64_Rbp] = op.DwarfRegisterFromUint64(ctxt.rbp) + dregs[regnum.AMD64_Rsi] = op.DwarfRegisterFromUint64(ctxt.rsi) + dregs[regnum.AMD64_Rdi] = op.DwarfRegisterFromUint64(ctxt.rdi) + dregs[regnum.AMD64_R8] = op.DwarfRegisterFromUint64(ctxt.r8) + dregs[regnum.AMD64_R9] = op.DwarfRegisterFromUint64(ctxt.r9) + dregs[regnum.AMD64_R10] = op.DwarfRegisterFromUint64(ctxt.r10) + dregs[regnum.AMD64_R11] = op.DwarfRegisterFromUint64(ctxt.r11) + dregs[regnum.AMD64_R12] = op.DwarfRegisterFromUint64(ctxt.r12) + dregs[regnum.AMD64_R13] = op.DwarfRegisterFromUint64(ctxt.r13) + dregs[regnum.AMD64_R14] = op.DwarfRegisterFromUint64(ctxt.r14) + dregs[regnum.AMD64_R15] = op.DwarfRegisterFromUint64(ctxt.r15) + dregs[regnum.AMD64_Rip] = op.DwarfRegisterFromUint64(ctxt.rip) + + return op.NewDwarfRegisters(0, dregs, binary.LittleEndian, regnum.AMD64_Rip, regnum.AMD64_Rsp, regnum.AMD64_Rbp, 0), nil +} + +func sigtrampContextWindowsARM64(mem MemoryReader, addr uint64) (*op.DwarfRegisters, error) { + type context struct { + contextflags uint32 + cpsr uint32 + x [31]uint64 // fp is x[29], lr is x[30] + xsp uint64 + pc uint64 + v [32]struct { + low uint64 + high int64 + } + fpcr uint32 + fpsr uint32 + bcr [8]uint32 + bvr [8]uint64 + wcr [2]uint32 + wvr [2]uint64 + } + + ctxtaddr, err := sigtrampContextFromExceptionPointers(mem, addr) + if err != nil { + return nil, err + } + buf := make([]byte, unsafe.Sizeof(context{})) + _, err = mem.ReadMemory(buf, ctxtaddr) + if err != nil { + return nil, fmt.Errorf("could not read context: %v", err) + } + ctxt := (*context)(unsafe.Pointer(&buf[0])) + + dregs := make([]*op.DwarfRegister, regnum.ARM64MaxRegNum()+1) + for i := range ctxt.x { + dregs[regnum.ARM64_X0+i] = op.DwarfRegisterFromUint64(ctxt.x[i]) + } + dregs[regnum.ARM64_SP] = op.DwarfRegisterFromUint64(ctxt.xsp) + dregs[regnum.ARM64_PC] = op.DwarfRegisterFromUint64(ctxt.pc) + return op.NewDwarfRegisters(0, dregs, binary.LittleEndian, regnum.ARM64_PC, regnum.ARM64_SP, regnum.ARM64_BP, regnum.ARM64_LR), nil +} + +func sigtrampContextDarwinAMD64(mem MemoryReader, addr uint64) (*op.DwarfRegisters, error) { + type ucontext struct { + uc_onstack int32 + uc_sigmask uint32 + uc_stack struct { + ss_sp uint64 // pointer + ss_size uintptr + ss_flags int32 + pad_cgo_0 [4]byte + } + uc_link uint64 // pointer + uc_mcsize uint64 + uc_mcontext uint64 // pointer + } + + type regmmst struct { + mmst_reg [10]int8 + mmst_rsrv [6]int8 + } + + type regxmm struct { + xmm_reg [16]int8 + } + + type floatstate64 struct { + fpu_reserved [2]int32 + fpu_fcw [2]byte + fpu_fsw [2]byte + fpu_ftw uint8 + fpu_rsrv1 uint8 + fpu_fop uint16 + fpu_ip uint32 + fpu_cs uint16 + fpu_rsrv2 uint16 + fpu_dp uint32 + fpu_ds uint16 + fpu_rsrv3 uint16 + fpu_mxcsr uint32 + fpu_mxcsrmask uint32 + fpu_stmm0 regmmst + fpu_stmm1 regmmst + fpu_stmm2 regmmst + fpu_stmm3 regmmst + fpu_stmm4 regmmst + fpu_stmm5 regmmst + fpu_stmm6 regmmst + fpu_stmm7 regmmst + fpu_xmm0 regxmm + fpu_xmm1 regxmm + fpu_xmm2 regxmm + fpu_xmm3 regxmm + fpu_xmm4 regxmm + fpu_xmm5 regxmm + fpu_xmm6 regxmm + fpu_xmm7 regxmm + fpu_xmm8 regxmm + fpu_xmm9 regxmm + fpu_xmm10 regxmm + fpu_xmm11 regxmm + fpu_xmm12 regxmm + fpu_xmm13 regxmm + fpu_xmm14 regxmm + fpu_xmm15 regxmm + fpu_rsrv4 [96]int8 + fpu_reserved1 int32 + } + + type regs64 struct { + rax uint64 + rbx uint64 + rcx uint64 + rdx uint64 + rdi uint64 + rsi uint64 + rbp uint64 + rsp uint64 + r8 uint64 + r9 uint64 + r10 uint64 + r11 uint64 + r12 uint64 + r13 uint64 + r14 uint64 + r15 uint64 + rip uint64 + rflags uint64 + cs uint64 + fs uint64 + gs uint64 + } + + type mcontext64 struct { + es struct { + trapno uint16 + cpu uint16 + err uint32 + faultvaddr uint64 + } + ss regs64 + fs floatstate64 + pad_cgo_0 [4]byte + } + + buf := make([]byte, unsafe.Sizeof(ucontext{})) + _, err := mem.ReadMemory(buf, addr) + if err != nil { + return nil, err + } + mctxtaddr := ((*ucontext)(unsafe.Pointer(&buf[0]))).uc_mcontext + + buf = make([]byte, unsafe.Sizeof(mcontext64{})) + _, err = mem.ReadMemory(buf, mctxtaddr) + if err != nil { + return nil, err + } + + ss := ((*mcontext64)(unsafe.Pointer(&buf[0]))).ss + dregs := make([]*op.DwarfRegister, regnum.AMD64MaxRegNum()+1) + dregs[regnum.AMD64_Rax] = op.DwarfRegisterFromUint64(ss.rax) + dregs[regnum.AMD64_Rbx] = op.DwarfRegisterFromUint64(ss.rbx) + dregs[regnum.AMD64_Rcx] = op.DwarfRegisterFromUint64(ss.rcx) + dregs[regnum.AMD64_Rdx] = op.DwarfRegisterFromUint64(ss.rdx) + dregs[regnum.AMD64_Rdi] = op.DwarfRegisterFromUint64(ss.rdi) + dregs[regnum.AMD64_Rsi] = op.DwarfRegisterFromUint64(ss.rsi) + dregs[regnum.AMD64_Rbp] = op.DwarfRegisterFromUint64(ss.rbp) + dregs[regnum.AMD64_Rsp] = op.DwarfRegisterFromUint64(ss.rsp) + dregs[regnum.AMD64_R8] = op.DwarfRegisterFromUint64(ss.r8) + dregs[regnum.AMD64_R9] = op.DwarfRegisterFromUint64(ss.r9) + dregs[regnum.AMD64_R10] = op.DwarfRegisterFromUint64(ss.r10) + dregs[regnum.AMD64_R11] = op.DwarfRegisterFromUint64(ss.r11) + dregs[regnum.AMD64_R12] = op.DwarfRegisterFromUint64(ss.r12) + dregs[regnum.AMD64_R13] = op.DwarfRegisterFromUint64(ss.r13) + dregs[regnum.AMD64_R14] = op.DwarfRegisterFromUint64(ss.r14) + dregs[regnum.AMD64_R15] = op.DwarfRegisterFromUint64(ss.r15) + dregs[regnum.AMD64_Rip] = op.DwarfRegisterFromUint64(ss.rip) + dregs[regnum.AMD64_Rflags] = op.DwarfRegisterFromUint64(ss.rflags) + dregs[regnum.AMD64_Cs] = op.DwarfRegisterFromUint64(ss.cs) + dregs[regnum.AMD64_Fs] = op.DwarfRegisterFromUint64(ss.fs) + dregs[regnum.AMD64_Gs] = op.DwarfRegisterFromUint64(ss.gs) + + return op.NewDwarfRegisters(0, dregs, binary.LittleEndian, regnum.AMD64_Rip, regnum.AMD64_Rsp, regnum.AMD64_Rbp, 0), nil +} + +func sigtrampContextDarwinARM64(mem MemoryReader, addr uint64) (*op.DwarfRegisters, error) { + type ucontext struct { + uc_onstack int32 + uc_sigmask uint32 + uc_stack struct { + ss_sp uint64 // pointer + ss_size uintptr + ss_flags int32 + pad_cgo_0 [4]byte + } + uc_link uint64 // pointer + uc_mcsize uint64 + uc_mcontext uint64 // pointer + } + + type regs64 struct { + x [29]uint64 // registers x0 to x28 + fp uint64 // frame register, x29 + lr uint64 // link register, x30 + sp uint64 // stack pointer, x31 + pc uint64 // program counter + cpsr uint32 // current program status register + __pad uint32 + } + + type mcontext64 struct { + es struct { + far uint64 // virtual fault addr + esr uint32 // exception syndrome + exc uint32 // number of arm exception taken + } + ss regs64 + ns struct { + v [64]uint64 // actually [32]uint128 + fpsr uint32 + fpcr uint32 + } + } + + buf := make([]byte, unsafe.Sizeof(ucontext{})) + _, err := mem.ReadMemory(buf, addr) + if err != nil { + return nil, err + } + mctxtaddr := ((*ucontext)(unsafe.Pointer(&buf[0]))).uc_mcontext + + buf = make([]byte, unsafe.Sizeof(mcontext64{})) + _, err = mem.ReadMemory(buf, mctxtaddr) + if err != nil { + return nil, err + } + + ss := ((*mcontext64)(unsafe.Pointer(&buf[0]))).ss + dregs := make([]*op.DwarfRegister, regnum.ARM64MaxRegNum()+1) + for i := range ss.x { + dregs[regnum.ARM64_X0+i] = op.DwarfRegisterFromUint64(ss.x[i]) + } + dregs[regnum.ARM64_BP] = op.DwarfRegisterFromUint64(ss.fp) + dregs[regnum.ARM64_LR] = op.DwarfRegisterFromUint64(ss.lr) + dregs[regnum.ARM64_SP] = op.DwarfRegisterFromUint64(ss.sp) + dregs[regnum.ARM64_PC] = op.DwarfRegisterFromUint64(ss.pc) + return op.NewDwarfRegisters(0, dregs, binary.LittleEndian, regnum.ARM64_PC, regnum.ARM64_SP, regnum.ARM64_BP, regnum.ARM64_LR), nil +} diff --git a/pkg/proc/stackwatch.go b/pkg/proc/stackwatch.go index 0e87427299..64f9dc027b 100644 --- a/pkg/proc/stackwatch.go +++ b/pkg/proc/stackwatch.go @@ -33,7 +33,7 @@ func (t *Target) setStackWatchBreakpoints(scope *EvalScope, watchpoint *Breakpoi return true, nil } - topframe, retframe, err := topframe(scope.g, nil) + topframe, retframe, err := topframe(t, scope.g, nil) if err != nil { return err } diff --git a/pkg/proc/target.go b/pkg/proc/target.go index 0a77344852..553abd7972 100644 --- a/pkg/proc/target.go +++ b/pkg/proc/target.go @@ -396,18 +396,26 @@ func (t *Target) createUnrecoveredPanicBreakpoint() { // createFatalThrowBreakpoint creates the a breakpoint as runtime.fatalthrow. func (t *Target) createFatalThrowBreakpoint() { - fatalpcs, err := FindFunctionLocation(t.Process, "runtime.throw", 0) - if err == nil { - bp, err := t.SetBreakpoint(fatalThrowID, fatalpcs[0], UserBreakpoint, nil) + setFatalThrow := func(pcs []uint64, err error) { if err == nil { - bp.Logical.Name = FatalThrow + bp, err := t.SetBreakpoint(fatalThrowID, pcs[0], UserBreakpoint, nil) + if err == nil { + bp.Logical.Name = FatalThrow + } } } - fatalpcs, err = FindFunctionLocation(t.Process, "runtime.fatal", 0) - if err == nil { - bp, err := t.SetBreakpoint(fatalThrowID, fatalpcs[0], UserBreakpoint, nil) - if err == nil { - bp.Logical.Name = FatalThrow + setFatalThrow(FindFunctionLocation(t.Process, "runtime.throw", 0)) + setFatalThrow(FindFunctionLocation(t.Process, "runtime.fatal", 0)) + setFatalThrow(FindFunctionLocation(t.Process, "runtime.winthrow", 0)) + // TODO: replace this with runtime.fatalpanic when associated CL gets merged + if fn := t.BinInfo().lookupOneFunc("runtime.sighandler"); fn != nil { + text, _ := Disassemble(t.Memory(), nil, t.Breakpoints(), t.BinInfo(), fn.Entry, fn.End) + for _, instr := range text { + if instr.IsCall() { + if instr.DestLoc != nil && instr.DestLoc.Fn != nil && instr.DestLoc.Fn.Name == "runtime.exit" { + setFatalThrow([]uint64{instr.Loc.PC}, nil) + } + } } } } diff --git a/pkg/proc/target_exec.go b/pkg/proc/target_exec.go index ed9da60e00..41c926446f 100644 --- a/pkg/proc/target_exec.go +++ b/pkg/proc/target_exec.go @@ -469,7 +469,7 @@ func (grp *TargetGroup) StepOut() error { selg := dbp.SelectedGoroutine() curthread := dbp.CurrentThread() - topframe, retframe, err := topframe(selg, curthread) + topframe, retframe, err := topframe(dbp, selg, curthread) if err != nil { return err } @@ -511,7 +511,7 @@ func (grp *TargetGroup) StepOut() error { } if topframe.Ret != 0 { - topframe, retframe := skipAutogeneratedWrappersOut(selg, curthread, &topframe, &retframe) + topframe, retframe := skipAutogeneratedWrappersOut(grp.Selected, selg, curthread, &topframe, &retframe) retFrameCond := astutil.And(sameGCond, frameoffCondition(retframe)) bp, err := allowDuplicateBreakpoint(dbp.SetBreakpoint(0, retframe.Current.PC, NextBreakpoint, retFrameCond)) if err != nil { @@ -600,7 +600,7 @@ func next(dbp *Target, stepInto, inlinedStepOut bool) error { backward := dbp.recman.GetDirection() == Backward selg := dbp.SelectedGoroutine() curthread := dbp.CurrentThread() - topframe, retframe, err := topframe(selg, curthread) + topframe, retframe, err := topframe(dbp, selg, curthread) if err != nil { return err } @@ -748,7 +748,7 @@ func next(dbp *Target, stepInto, inlinedStepOut bool) error { } if !topframe.Inlined { - topframe, retframe := skipAutogeneratedWrappersOut(selg, curthread, &topframe, &retframe) + topframe, retframe := skipAutogeneratedWrappersOut(dbp, selg, curthread, &topframe, &retframe) retFrameCond := astutil.And(sameGCond, frameoffCondition(retframe)) // Add a breakpoint on the return address for the current frame. @@ -1031,7 +1031,7 @@ func skipAutogeneratedWrappersIn(p Process, startfn *Function, startpc uint64) ( // step out breakpoint. // See genwrapper in: $GOROOT/src/cmd/compile/internal/gc/subr.go // It also skips runtime.deferreturn frames (which are only ever on the stack on Go 1.18 or later) -func skipAutogeneratedWrappersOut(g *G, thread Thread, startTopframe, startRetframe *Stackframe) (topframe, retframe *Stackframe) { +func skipAutogeneratedWrappersOut(tgt *Target, g *G, thread Thread, startTopframe, startRetframe *Stackframe) (topframe, retframe *Stackframe) { topframe, retframe = startTopframe, startRetframe if startTopframe.Ret == 0 { return @@ -1049,9 +1049,9 @@ func skipAutogeneratedWrappersOut(g *G, thread Thread, startTopframe, startRetfr var err error var frames []Stackframe if g == nil { - frames, err = ThreadStacktrace(thread, maxSkipAutogeneratedWrappers) + frames, err = ThreadStacktrace(tgt, thread, maxSkipAutogeneratedWrappers) } else { - frames, err = g.Stacktrace(maxSkipAutogeneratedWrappers, 0) + frames, err = GoroutineStacktrace(tgt, g, maxSkipAutogeneratedWrappers, 0) } if err != nil { return @@ -1147,9 +1147,9 @@ func stepOutReverse(p *Target, topframe, retframe Stackframe, sameGCond ast.Expr var frames []Stackframe if selg == nil { - frames, err = ThreadStacktrace(curthread, 3) + frames, err = ThreadStacktrace(p, curthread, 3) } else { - frames, err = selg.Stacktrace(3, 0) + frames, err = GoroutineStacktrace(p, selg, 3, 0) } if err != nil { return err diff --git a/pkg/proc/test/support.go b/pkg/proc/test/support.go index c9bd0e3c56..d06aba58a3 100644 --- a/pkg/proc/test/support.go +++ b/pkg/proc/test/support.go @@ -364,6 +364,9 @@ var hasCgo = func() bool { if strings.TrimSpace(string(out)) != "1" { return false } + if runtime.GOOS == "freebsd" { + return true + } _, err = exec.LookPath("gcc") return err == nil }() diff --git a/pkg/proc/threads.go b/pkg/proc/threads.go index 8b55e4c7a8..df83a7af5a 100644 --- a/pkg/proc/threads.go +++ b/pkg/proc/threads.go @@ -67,14 +67,14 @@ func (t *CommonThread) ReturnValues(cfg LoadConfig) []*Variable { } // topframe returns the two topmost frames of g, or thread if g is nil. -func topframe(g *G, thread Thread) (Stackframe, Stackframe, error) { +func topframe(tgt *Target, g *G, thread Thread) (Stackframe, Stackframe, error) { var frames []Stackframe var err error if g == nil { - frames, err = ThreadStacktrace(thread, 1) + frames, err = ThreadStacktrace(tgt, thread, 1) } else { - frames, err = g.Stacktrace(1, StacktraceReadDefers) + frames, err = GoroutineStacktrace(tgt, g, 1, StacktraceReadDefers) } if err != nil { return Stackframe{}, Stackframe{}, err diff --git a/pkg/proc/variables.go b/pkg/proc/variables.go index 65f5040dc1..7b3e3652db 100644 --- a/pkg/proc/variables.go +++ b/pkg/proc/variables.go @@ -497,7 +497,7 @@ func (g *G) Defer() *Defer { // UserCurrent returns the location the users code is at, // or was at before entering a runtime function. func (g *G) UserCurrent() Location { - it, err := g.stackIterator(0) + it, err := goroutineStackIterator(nil, g, 0) if err != nil { return g.CurrentLoc } diff --git a/pkg/proc/variables_fuzz_test.go b/pkg/proc/variables_fuzz_test.go index 07e28d3736..55ec7a26df 100644 --- a/pkg/proc/variables_fuzz_test.go +++ b/pkg/proc/variables_fuzz_test.go @@ -127,7 +127,7 @@ func doFuzzEvalExpressionSetup(f *testing.F) { // 3. Run all the test cases on the core file, register which memory addresses are read - frames, err := c.SelectedGoroutine().Stacktrace(2, 0) + frames, err := proc.GoroutineStacktrace(c, c.SelectedGoroutine(), 2, 0) assertNoError(err, f, "Stacktrace") mem := c.Memory() diff --git a/service/debugger/debugger.go b/service/debugger/debugger.go index af5eb5c1b1..ca1ad2aa60 100644 --- a/service/debugger/debugger.go +++ b/service/debugger/debugger.go @@ -1338,7 +1338,7 @@ func (d *Debugger) collectBreakpointInformation(apiThread *api.Thread, thread pr } if bp.Stacktrace > 0 { - rawlocs, err := proc.ThreadStacktrace(thread, bp.Stacktrace) + rawlocs, err := proc.ThreadStacktrace(tgt, thread, bp.Stacktrace) if err != nil { return err } @@ -1757,9 +1757,9 @@ func (d *Debugger) Stacktrace(goroutineID int64, depth int, opts api.StacktraceO } if g == nil { - return proc.ThreadStacktrace(d.target.Selected.CurrentThread(), depth) + return proc.ThreadStacktrace(d.target.Selected, d.target.Selected.CurrentThread(), depth) } else { - return g.Stacktrace(depth, proc.StacktraceOptions(opts)) + return proc.GoroutineStacktrace(d.target.Selected, g, depth, proc.StacktraceOptions(opts)) } }