From 892e7563944648a3ba3171f9fa376e4a8181ac81 Mon Sep 17 00:00:00 2001 From: Alberto Garcia Hierro Date: Sat, 14 Sep 2024 17:58:31 +0100 Subject: [PATCH] debug: add trace flight recorder Use golang.org/x/exp/trace to implement an trace recorder that saves the trace to a circular buffer and can be retrieved at any time. Debug endpoints have been added under /debug/flight to start and stop the trace as well as to set its period. Due to golang.org/x/exp/trace, the minimum go version has been bumped to 1.22 Signed-off-by: Alberto Garcia Hierro --- cmd/buildkitd/debug.go | 2 + cmd/buildkitd/debug_flight.go | 87 + control/control.go | 3 + go.mod | 7 +- go.sum | 16 +- hack/dockerfiles/docs-dockerfile.Dockerfile | 2 +- vendor/golang.org/x/exp/LICENSE | 27 + vendor/golang.org/x/exp/PATENTS | 22 + vendor/golang.org/x/exp/trace/base.go | 278 +++ vendor/golang.org/x/exp/trace/batch.go | 115 ++ vendor/golang.org/x/exp/trace/batchcursor.go | 178 ++ vendor/golang.org/x/exp/trace/event.go | 867 +++++++++ .../golang.org/x/exp/trace/flightrecorder.go | 367 ++++ vendor/golang.org/x/exp/trace/gen.bash | 99 ++ vendor/golang.org/x/exp/trace/generation.go | 458 +++++ .../x/exp/trace/internal/event/event.go | 106 ++ .../x/exp/trace/internal/event/go122/event.go | 515 ++++++ .../exp/trace/internal/event/requirements.go | 30 + .../x/exp/trace/internal/oldtrace/order.go | 176 ++ .../x/exp/trace/internal/oldtrace/parser.go | 1548 +++++++++++++++++ .../x/exp/trace/internal/version/version.go | 75 + vendor/golang.org/x/exp/trace/oldtrace.go | 572 ++++++ vendor/golang.org/x/exp/trace/order.go | 1403 +++++++++++++++ vendor/golang.org/x/exp/trace/parser.go | 83 + vendor/golang.org/x/exp/trace/reader.go | 242 +++ vendor/golang.org/x/exp/trace/resources.go | 278 +++ vendor/golang.org/x/exp/trace/value.go | 57 + vendor/golang.org/x/mod/LICENSE | 4 +- vendor/golang.org/x/sync/LICENSE | 4 +- vendor/modules.txt | 13 +- 30 files changed, 7615 insertions(+), 19 deletions(-) create mode 100644 cmd/buildkitd/debug_flight.go create mode 100644 vendor/golang.org/x/exp/LICENSE create mode 100644 vendor/golang.org/x/exp/PATENTS create mode 100644 vendor/golang.org/x/exp/trace/base.go create mode 100644 vendor/golang.org/x/exp/trace/batch.go create mode 100644 vendor/golang.org/x/exp/trace/batchcursor.go create mode 100644 vendor/golang.org/x/exp/trace/event.go create mode 100644 vendor/golang.org/x/exp/trace/flightrecorder.go create mode 100644 vendor/golang.org/x/exp/trace/gen.bash create mode 100644 vendor/golang.org/x/exp/trace/generation.go create mode 100644 vendor/golang.org/x/exp/trace/internal/event/event.go create mode 100644 vendor/golang.org/x/exp/trace/internal/event/go122/event.go create mode 100644 vendor/golang.org/x/exp/trace/internal/event/requirements.go create mode 100644 vendor/golang.org/x/exp/trace/internal/oldtrace/order.go create mode 100644 vendor/golang.org/x/exp/trace/internal/oldtrace/parser.go create mode 100644 vendor/golang.org/x/exp/trace/internal/version/version.go create mode 100644 vendor/golang.org/x/exp/trace/oldtrace.go create mode 100644 vendor/golang.org/x/exp/trace/order.go create mode 100644 vendor/golang.org/x/exp/trace/parser.go create mode 100644 vendor/golang.org/x/exp/trace/reader.go create mode 100644 vendor/golang.org/x/exp/trace/resources.go create mode 100644 vendor/golang.org/x/exp/trace/value.go diff --git a/cmd/buildkitd/debug.go b/cmd/buildkitd/debug.go index c818bbc06be2..ad6785384a01 100644 --- a/cmd/buildkitd/debug.go +++ b/cmd/buildkitd/debug.go @@ -31,6 +31,8 @@ func setupDebugHandlers(addr string) error { m.Handle("/metrics", promhttp.Handler()) + setupDebugFlight(m) + // setting debugaddr is opt-in. permission is defined by listener address trace.AuthRequest = func(_ *http.Request) (bool, bool) { return true, true diff --git a/cmd/buildkitd/debug_flight.go b/cmd/buildkitd/debug_flight.go new file mode 100644 index 000000000000..0b4f5f653266 --- /dev/null +++ b/cmd/buildkitd/debug_flight.go @@ -0,0 +1,87 @@ +package main + +import ( + "fmt" + "net/http" + "sync" + "time" + + "golang.org/x/exp/trace" +) + +type flightRecorder struct { + mu sync.Mutex + recorder *trace.FlightRecorder +} + +func newFlightRecorder() *flightRecorder { + dbg := &flightRecorder{ + recorder: trace.NewFlightRecorder(), + } + return dbg +} + +func (r *flightRecorder) StartTrace(w http.ResponseWriter, req *http.Request) { + r.mu.Lock() + defer r.mu.Unlock() + if r.recorder.Enabled() { + http.Error(w, "flight recorder is already running", http.StatusConflict) + return + } + if err := r.recorder.Start(); err != nil { + http.Error(w, fmt.Sprintf("could not start flight recorder: %s", err), http.StatusInternalServerError) + return + } +} + +func (r *flightRecorder) StopTrace(w http.ResponseWriter, req *http.Request) { + r.mu.Lock() + defer r.mu.Unlock() + if !r.recorder.Enabled() { + http.Error(w, "flight recorder is not running", http.StatusConflict) + return + } + if err := r.recorder.Stop(); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } +} + +func (r *flightRecorder) SetTracePeriod(w http.ResponseWriter, req *http.Request) { + r.mu.Lock() + defer r.mu.Unlock() + if r.recorder.Enabled() { + http.Error(w, "flight recorder is running, stop it to change its period", http.StatusPreconditionFailed) + return + } + periodValue := req.FormValue("period") + period, err := time.ParseDuration(periodValue) + if err != nil { + http.Error(w, fmt.Sprintf("invalid flight recorder period: %s", err), http.StatusBadRequest) + } + r.recorder.SetPeriod(period) +} + +func (r *flightRecorder) Trace(w http.ResponseWriter, req *http.Request) { + r.mu.Lock() + defer r.mu.Unlock() + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Disposition", `attachment; filename="trace"`) + if _, err := r.recorder.WriteTo(w); err != nil { + http.Error(w, fmt.Sprintf("could not write in-flight trace: %s", err), http.StatusInternalServerError) + } +} + +func setupDebugFlight(m *http.ServeMux) { + r := newFlightRecorder() + + const ( + flightPattern = "/debug/flight" + flightTracePattern = flightPattern + "/trace" + ) + + m.HandleFunc("POST "+flightTracePattern+"/start", r.StartTrace) + m.HandleFunc("POST "+flightTracePattern+"/stop", r.StopTrace) + m.HandleFunc("POST "+flightTracePattern+"/set_period", r.SetTracePeriod) + m.HandleFunc("GET "+flightTracePattern, r.Trace) +} diff --git a/control/control.go b/control/control.go index 14b8ee5b1e0e..dfc0b6c9d9b2 100644 --- a/control/control.go +++ b/control/control.go @@ -3,6 +3,7 @@ package control import ( "context" "fmt" + "runtime/trace" "strconv" "sync" "sync/atomic" @@ -343,6 +344,8 @@ func translateLegacySolveRequest(req *controlapi.SolveRequest) { } func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*controlapi.SolveResponse, error) { + defer trace.StartRegion(ctx, "Solve").End() + trace.Logf(ctx, "Request", "solve request: %v", req.Ref) atomic.AddInt64(&c.buildCount, 1) defer atomic.AddInt64(&c.buildCount, -1) diff --git a/go.mod b/go.mod index 45d6ee9f5385..a9faeaf001e0 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/moby/buildkit -go 1.21.0 +go 1.22.0 require ( github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 @@ -95,9 +95,10 @@ require ( go.opentelemetry.io/otel/trace v1.21.0 go.opentelemetry.io/proto/otlp v1.0.0 golang.org/x/crypto v0.23.0 - golang.org/x/mod v0.17.0 + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 + golang.org/x/mod v0.21.0 golang.org/x/net v0.25.0 - golang.org/x/sync v0.7.0 + golang.org/x/sync v0.8.0 golang.org/x/sys v0.22.0 golang.org/x/time v0.3.0 google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 diff --git a/go.sum b/go.sum index 91083621c45d..8f6ec4251b07 100644 --- a/go.sum +++ b/go.sum @@ -450,15 +450,15 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -482,8 +482,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -515,8 +515,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw= -golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/hack/dockerfiles/docs-dockerfile.Dockerfile b/hack/dockerfiles/docs-dockerfile.Dockerfile index 49d42a7abba5..e16050135940 100644 --- a/hack/dockerfiles/docs-dockerfile.Dockerfile +++ b/hack/dockerfiles/docs-dockerfile.Dockerfile @@ -1,6 +1,6 @@ # syntax=docker/dockerfile:1 -ARG GO_VERSION=1.21 +ARG GO_VERSION=1.22 ARG ALPINE_VERSION=3.20 FROM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS golatest diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE new file mode 100644 index 000000000000..2a7cf70da6e4 --- /dev/null +++ b/vendor/golang.org/x/exp/LICENSE @@ -0,0 +1,27 @@ +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS new file mode 100644 index 000000000000..733099041f84 --- /dev/null +++ b/vendor/golang.org/x/exp/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/exp/trace/base.go b/vendor/golang.org/x/exp/trace/base.go new file mode 100644 index 000000000000..d3d5db49d190 --- /dev/null +++ b/vendor/golang.org/x/exp/trace/base.go @@ -0,0 +1,278 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by "gen.bash" from internal/trace; DO NOT EDIT. + +//go:build go1.21 + +// This file contains data types that all implementations of the trace format +// parser need to provide to the rest of the package. + +package trace + +import ( + "fmt" + "math" + "strings" + + "golang.org/x/exp/trace/internal/event" + "golang.org/x/exp/trace/internal/event/go122" + "golang.org/x/exp/trace/internal/version" +) + +// maxArgs is the maximum number of arguments for "plain" events, +// i.e. anything that could reasonably be represented as a baseEvent. +// +// TODO(mknyszek): This is only 6 instead of 5 because GoStatusStack +// has 5 arguments and needs to smuggle in a 6th. Figure out a way to +// shrink this in the future. +const maxArgs = 6 + +// timedEventArgs is an array that is able to hold the arguments for any +// timed event. +type timedEventArgs [maxArgs - 1]uint64 + +// baseEvent is the basic unprocessed event. This serves as a common +// fundamental data structure across. +type baseEvent struct { + typ event.Type + time Time + args timedEventArgs +} + +// extra returns a slice representing extra available space in args +// that the parser can use to pass data up into Event. +func (e *baseEvent) extra(v version.Version) []uint64 { + switch v { + case version.Go122: + return e.args[len(go122.Specs()[e.typ].Args)-1:] + } + panic(fmt.Sprintf("unsupported version: go 1.%d", v)) +} + +// evTable contains the per-generation data necessary to +// interpret an individual event. +type evTable struct { + freq frequency + strings dataTable[stringID, string] + stacks dataTable[stackID, stack] + pcs map[uint64]frame + + // extraStrings are strings that get generated during + // parsing but haven't come directly from the trace, so + // they don't appear in strings. + extraStrings []string + extraStringIDs map[string]extraStringID + nextExtra extraStringID + + // expData contains extra unparsed data that is accessible + // only to ExperimentEvent via an EventExperimental event. + expData map[event.Experiment]*ExperimentalData +} + +// addExtraString adds an extra string to the evTable and returns +// a unique ID for the string in the table. +func (t *evTable) addExtraString(s string) extraStringID { + if s == "" { + return 0 + } + if t.extraStringIDs == nil { + t.extraStringIDs = make(map[string]extraStringID) + } + if id, ok := t.extraStringIDs[s]; ok { + return id + } + t.nextExtra++ + id := t.nextExtra + t.extraStrings = append(t.extraStrings, s) + t.extraStringIDs[s] = id + return id +} + +// getExtraString returns the extra string for the provided ID. +// The ID must have been produced by addExtraString for this evTable. +func (t *evTable) getExtraString(id extraStringID) string { + if id == 0 { + return "" + } + return t.extraStrings[id-1] +} + +// dataTable is a mapping from EIs to Es. +type dataTable[EI ~uint64, E any] struct { + present []uint8 + dense []E + sparse map[EI]E +} + +// insert tries to add a mapping from id to s. +// +// Returns an error if a mapping for id already exists, regardless +// of whether or not s is the same in content. This should be used +// for validation during parsing. +func (d *dataTable[EI, E]) insert(id EI, data E) error { + if d.sparse == nil { + d.sparse = make(map[EI]E) + } + if existing, ok := d.get(id); ok { + return fmt.Errorf("multiple %Ts with the same ID: id=%d, new=%v, existing=%v", data, id, data, existing) + } + d.sparse[id] = data + return nil +} + +// compactify attempts to compact sparse into dense. +// +// This is intended to be called only once after insertions are done. +func (d *dataTable[EI, E]) compactify() { + if d.sparse == nil || len(d.dense) != 0 { + // Already compactified. + return + } + // Find the range of IDs. + maxID := EI(0) + minID := ^EI(0) + for id := range d.sparse { + if id > maxID { + maxID = id + } + if id < minID { + minID = id + } + } + if maxID >= math.MaxInt { + // We can't create a slice big enough to hold maxID elements + return + } + // We're willing to waste at most 2x memory. + if int(maxID-minID) > max(len(d.sparse), 2*len(d.sparse)) { + return + } + if int(minID) > len(d.sparse) { + return + } + size := int(maxID) + 1 + d.present = make([]uint8, (size+7)/8) + d.dense = make([]E, size) + for id, data := range d.sparse { + d.dense[id] = data + d.present[id/8] |= uint8(1) << (id % 8) + } + d.sparse = nil +} + +// get returns the E for id or false if it doesn't +// exist. This should be used for validation during parsing. +func (d *dataTable[EI, E]) get(id EI) (E, bool) { + if id == 0 { + return *new(E), true + } + if uint64(id) < uint64(len(d.dense)) { + if d.present[id/8]&(uint8(1)<<(id%8)) != 0 { + return d.dense[id], true + } + } else if d.sparse != nil { + if data, ok := d.sparse[id]; ok { + return data, true + } + } + return *new(E), false +} + +// forEach iterates over all ID/value pairs in the data table. +func (d *dataTable[EI, E]) forEach(yield func(EI, E) bool) bool { + for id, value := range d.dense { + if d.present[id/8]&(uint8(1)<<(id%8)) == 0 { + continue + } + if !yield(EI(id), value) { + return false + } + } + if d.sparse == nil { + return true + } + for id, value := range d.sparse { + if !yield(id, value) { + return false + } + } + return true +} + +// mustGet returns the E for id or panics if it fails. +// +// This should only be used if id has already been validated. +func (d *dataTable[EI, E]) mustGet(id EI) E { + data, ok := d.get(id) + if !ok { + panic(fmt.Sprintf("expected id %d in %T table", id, data)) + } + return data +} + +// frequency is nanoseconds per timestamp unit. +type frequency float64 + +// mul multiplies an unprocessed to produce a time in nanoseconds. +func (f frequency) mul(t timestamp) Time { + return Time(float64(t) * float64(f)) +} + +// stringID is an index into the string table for a generation. +type stringID uint64 + +// extraStringID is an index into the extra string table for a generation. +type extraStringID uint64 + +// stackID is an index into the stack table for a generation. +type stackID uint64 + +// cpuSample represents a CPU profiling sample captured by the trace. +type cpuSample struct { + schedCtx + time Time + stack stackID +} + +// asEvent produces a complete Event from a cpuSample. It needs +// the evTable from the generation that created it. +// +// We don't just store it as an Event in generation to minimize +// the amount of pointer data floating around. +func (s cpuSample) asEvent(table *evTable) Event { + // TODO(mknyszek): This is go122-specific, but shouldn't be. + // Generalize this in the future. + e := Event{ + table: table, + ctx: s.schedCtx, + base: baseEvent{ + typ: go122.EvCPUSample, + time: s.time, + }, + } + e.base.args[0] = uint64(s.stack) + return e +} + +// stack represents a goroutine stack sample. +type stack struct { + pcs []uint64 +} + +func (s stack) String() string { + var sb strings.Builder + for _, frame := range s.pcs { + fmt.Fprintf(&sb, "\t%#v\n", frame) + } + return sb.String() +} + +// frame represents a single stack frame. +type frame struct { + pc uint64 + funcID stringID + fileID stringID + line uint64 +} diff --git a/vendor/golang.org/x/exp/trace/batch.go b/vendor/golang.org/x/exp/trace/batch.go new file mode 100644 index 000000000000..4d6c530f2827 --- /dev/null +++ b/vendor/golang.org/x/exp/trace/batch.go @@ -0,0 +1,115 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by "gen.bash" from internal/trace; DO NOT EDIT. + +//go:build go1.21 + +package trace + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + + "golang.org/x/exp/trace/internal/event" + "golang.org/x/exp/trace/internal/event/go122" +) + +// timestamp is an unprocessed timestamp. +type timestamp uint64 + +// batch represents a batch of trace events. +// It is unparsed except for its header. +type batch struct { + m ThreadID + time timestamp + data []byte + exp event.Experiment +} + +func (b *batch) isStringsBatch() bool { + return b.exp == event.NoExperiment && len(b.data) > 0 && event.Type(b.data[0]) == go122.EvStrings +} + +func (b *batch) isStacksBatch() bool { + return b.exp == event.NoExperiment && len(b.data) > 0 && event.Type(b.data[0]) == go122.EvStacks +} + +func (b *batch) isCPUSamplesBatch() bool { + return b.exp == event.NoExperiment && len(b.data) > 0 && event.Type(b.data[0]) == go122.EvCPUSamples +} + +func (b *batch) isFreqBatch() bool { + return b.exp == event.NoExperiment && len(b.data) > 0 && event.Type(b.data[0]) == go122.EvFrequency +} + +// readBatch reads the next full batch from r. +func readBatch(r interface { + io.Reader + io.ByteReader +}) (batch, uint64, error) { + // Read batch header byte. + b, err := r.ReadByte() + if err != nil { + return batch{}, 0, err + } + if typ := event.Type(b); typ != go122.EvEventBatch && typ != go122.EvExperimentalBatch { + return batch{}, 0, fmt.Errorf("expected batch event, got %s", go122.EventString(typ)) + } + + // Read the experiment of we have one. + exp := event.NoExperiment + if event.Type(b) == go122.EvExperimentalBatch { + e, err := r.ReadByte() + if err != nil { + return batch{}, 0, err + } + exp = event.Experiment(e) + } + + // Read the batch header: gen (generation), thread (M) ID, base timestamp + // for the batch. + gen, err := binary.ReadUvarint(r) + if err != nil { + return batch{}, gen, fmt.Errorf("error reading batch gen: %w", err) + } + m, err := binary.ReadUvarint(r) + if err != nil { + return batch{}, gen, fmt.Errorf("error reading batch M ID: %w", err) + } + ts, err := binary.ReadUvarint(r) + if err != nil { + return batch{}, gen, fmt.Errorf("error reading batch timestamp: %w", err) + } + + // Read in the size of the batch to follow. + size, err := binary.ReadUvarint(r) + if err != nil { + return batch{}, gen, fmt.Errorf("error reading batch size: %w", err) + } + if size > go122.MaxBatchSize { + return batch{}, gen, fmt.Errorf("invalid batch size %d, maximum is %d", size, go122.MaxBatchSize) + } + + // Copy out the batch for later processing. + var data bytes.Buffer + data.Grow(int(size)) + n, err := io.CopyN(&data, r, int64(size)) + if n != int64(size) { + return batch{}, gen, fmt.Errorf("failed to read full batch: read %d but wanted %d", n, size) + } + if err != nil { + return batch{}, gen, fmt.Errorf("copying batch data: %w", err) + } + + // Return the batch. + return batch{ + m: ThreadID(m), + time: timestamp(ts), + data: data.Bytes(), + exp: exp, + }, gen, nil +} diff --git a/vendor/golang.org/x/exp/trace/batchcursor.go b/vendor/golang.org/x/exp/trace/batchcursor.go new file mode 100644 index 000000000000..c3b0f518f7d3 --- /dev/null +++ b/vendor/golang.org/x/exp/trace/batchcursor.go @@ -0,0 +1,178 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by "gen.bash" from internal/trace; DO NOT EDIT. + +//go:build go1.21 + +package trace + +import ( + "cmp" + "encoding/binary" + "fmt" + + "golang.org/x/exp/trace/internal/event" + "golang.org/x/exp/trace/internal/event/go122" +) + +type batchCursor struct { + m ThreadID + lastTs Time + idx int // next index into []batch + dataOff int // next index into batch.data + ev baseEvent // last read event +} + +func (b *batchCursor) nextEvent(batches []batch, freq frequency) (ok bool, err error) { + // Batches should generally always have at least one event, + // but let's be defensive about that and accept empty batches. + for b.idx < len(batches) && len(batches[b.idx].data) == b.dataOff { + b.idx++ + b.dataOff = 0 + b.lastTs = 0 + } + // Have we reached the end of the batches? + if b.idx == len(batches) { + return false, nil + } + // Initialize lastTs if it hasn't been yet. + if b.lastTs == 0 { + b.lastTs = freq.mul(batches[b.idx].time) + } + // Read an event out. + n, tsdiff, err := readTimedBaseEvent(batches[b.idx].data[b.dataOff:], &b.ev) + if err != nil { + return false, err + } + // Complete the timestamp from the cursor's last timestamp. + b.ev.time = freq.mul(tsdiff) + b.lastTs + + // Move the cursor's timestamp forward. + b.lastTs = b.ev.time + + // Move the cursor forward. + b.dataOff += n + return true, nil +} + +func (b *batchCursor) compare(a *batchCursor) int { + return cmp.Compare(b.ev.time, a.ev.time) +} + +// readTimedBaseEvent reads out the raw event data from b +// into e. It does not try to interpret the arguments +// but it does validate that the event is a regular +// event with a timestamp (vs. a structural event). +// +// It requires that the event its reading be timed, which must +// be the case for every event in a plain EventBatch. +func readTimedBaseEvent(b []byte, e *baseEvent) (int, timestamp, error) { + // Get the event type. + typ := event.Type(b[0]) + specs := go122.Specs() + if int(typ) >= len(specs) { + return 0, 0, fmt.Errorf("found invalid event type: %v", typ) + } + e.typ = typ + + // Get spec. + spec := &specs[typ] + if len(spec.Args) == 0 || !spec.IsTimedEvent { + return 0, 0, fmt.Errorf("found event without a timestamp: type=%v", typ) + } + n := 1 + + // Read timestamp diff. + ts, nb := binary.Uvarint(b[n:]) + if nb <= 0 { + return 0, 0, fmt.Errorf("found invalid uvarint for timestamp") + } + n += nb + + // Read the rest of the arguments. + for i := 0; i < len(spec.Args)-1; i++ { + arg, nb := binary.Uvarint(b[n:]) + if nb <= 0 { + return 0, 0, fmt.Errorf("found invalid uvarint") + } + e.args[i] = arg + n += nb + } + return n, timestamp(ts), nil +} + +func heapInsert(heap []*batchCursor, bc *batchCursor) []*batchCursor { + // Add the cursor to the end of the heap. + heap = append(heap, bc) + + // Sift the new entry up to the right place. + heapSiftUp(heap, len(heap)-1) + return heap +} + +func heapUpdate(heap []*batchCursor, i int) { + // Try to sift up. + if heapSiftUp(heap, i) != i { + return + } + // Try to sift down, if sifting up failed. + heapSiftDown(heap, i) +} + +func heapRemove(heap []*batchCursor, i int) []*batchCursor { + // Sift index i up to the root, ignoring actual values. + for i > 0 { + heap[(i-1)/2], heap[i] = heap[i], heap[(i-1)/2] + i = (i - 1) / 2 + } + // Swap the root with the last element, then remove it. + heap[0], heap[len(heap)-1] = heap[len(heap)-1], heap[0] + heap = heap[:len(heap)-1] + // Sift the root down. + heapSiftDown(heap, 0) + return heap +} + +func heapSiftUp(heap []*batchCursor, i int) int { + for i > 0 && heap[(i-1)/2].ev.time > heap[i].ev.time { + heap[(i-1)/2], heap[i] = heap[i], heap[(i-1)/2] + i = (i - 1) / 2 + } + return i +} + +func heapSiftDown(heap []*batchCursor, i int) int { + for { + m := min3(heap, i, 2*i+1, 2*i+2) + if m == i { + // Heap invariant already applies. + break + } + heap[i], heap[m] = heap[m], heap[i] + i = m + } + return i +} + +func min3(b []*batchCursor, i0, i1, i2 int) int { + minIdx := i0 + minT := maxTime + if i0 < len(b) { + minT = b[i0].ev.time + } + if i1 < len(b) { + if t := b[i1].ev.time; t < minT { + minT = t + minIdx = i1 + } + } + if i2 < len(b) { + if t := b[i2].ev.time; t < minT { + minT = t + minIdx = i2 + } + } + return minIdx +} diff --git a/vendor/golang.org/x/exp/trace/event.go b/vendor/golang.org/x/exp/trace/event.go new file mode 100644 index 000000000000..a3093fdfefbc --- /dev/null +++ b/vendor/golang.org/x/exp/trace/event.go @@ -0,0 +1,867 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by "gen.bash" from internal/trace; DO NOT EDIT. + +//go:build go1.21 + +package trace + +import ( + "fmt" + "math" + "strings" + "time" + + "golang.org/x/exp/trace/internal/event" + "golang.org/x/exp/trace/internal/event/go122" + "golang.org/x/exp/trace/internal/version" +) + +// EventKind indicates the kind of event this is. +// +// Use this information to obtain a more specific event that +// allows access to more detailed information. +type EventKind uint16 + +const ( + EventBad EventKind = iota + + // EventKindSync is an event that indicates a global synchronization + // point in the trace. At the point of a sync event, the + // trace reader can be certain that all resources (e.g. threads, + // goroutines) that have existed until that point have been enumerated. + EventSync + + // EventMetric is an event that represents the value of a metric at + // a particular point in time. + EventMetric + + // EventLabel attaches a label to a resource. + EventLabel + + // EventStackSample represents an execution sample, indicating what a + // thread/proc/goroutine was doing at a particular point in time via + // its backtrace. + // + // Note: Samples should be considered a close approximation of + // what a thread/proc/goroutine was executing at a given point in time. + // These events may slightly contradict the situation StateTransitions + // describe, so they should only be treated as a best-effort annotation. + EventStackSample + + // EventRangeBegin and EventRangeEnd are a pair of generic events representing + // a special range of time. Ranges are named and scoped to some resource + // (identified via ResourceKind). A range that has begun but has not ended + // is considered active. + // + // EvRangeBegin and EvRangeEnd will share the same name, and an End will always + // follow a Begin on the same instance of the resource. The associated + // resource ID can be obtained from the Event. ResourceNone indicates the + // range is globally scoped. That is, any goroutine/proc/thread can start or + // stop, but only one such range may be active at any given time. + // + // EventRangeActive is like EventRangeBegin, but indicates that the range was + // already active. In this case, the resource referenced may not be in the current + // context. + EventRangeBegin + EventRangeActive + EventRangeEnd + + // EvTaskBegin and EvTaskEnd are a pair of events representing a runtime/trace.Task. + EventTaskBegin + EventTaskEnd + + // EventRegionBegin and EventRegionEnd are a pair of events represent a runtime/trace.Region. + EventRegionBegin + EventRegionEnd + + // EventLog represents a runtime/trace.Log call. + EventLog + + // EventStateTransition represents a state change for some resource. + EventStateTransition + + // EventExperimental is an experimental event that is unvalidated and exposed in a raw form. + // Users are expected to understand the format and perform their own validation. These events + // may always be safely ignored. + EventExperimental +) + +// String returns a string form of the EventKind. +func (e EventKind) String() string { + if int(e) >= len(eventKindStrings) { + return eventKindStrings[0] + } + return eventKindStrings[e] +} + +var eventKindStrings = [...]string{ + EventBad: "Bad", + EventSync: "Sync", + EventMetric: "Metric", + EventLabel: "Label", + EventStackSample: "StackSample", + EventRangeBegin: "RangeBegin", + EventRangeActive: "RangeActive", + EventRangeEnd: "RangeEnd", + EventTaskBegin: "TaskBegin", + EventTaskEnd: "TaskEnd", + EventRegionBegin: "RegionBegin", + EventRegionEnd: "RegionEnd", + EventLog: "Log", + EventStateTransition: "StateTransition", + EventExperimental: "Experimental", +} + +const maxTime = Time(math.MaxInt64) + +// Time is a timestamp in nanoseconds. +// +// It corresponds to the monotonic clock on the platform that the +// trace was taken, and so is possible to correlate with timestamps +// for other traces taken on the same machine using the same clock +// (i.e. no reboots in between). +// +// The actual absolute value of the timestamp is only meaningful in +// relation to other timestamps from the same clock. +// +// BUG: Timestamps coming from traces on Windows platforms are +// only comparable with timestamps from the same trace. Timestamps +// across traces cannot be compared, because the system clock is +// not used as of Go 1.22. +// +// BUG: Traces produced by Go versions 1.21 and earlier cannot be +// compared with timestamps from other traces taken on the same +// machine. This is because the system clock was not used at all +// to collect those timestamps. +type Time int64 + +// Sub subtracts t0 from t, returning the duration in nanoseconds. +func (t Time) Sub(t0 Time) time.Duration { + return time.Duration(int64(t) - int64(t0)) +} + +// Metric provides details about a Metric event. +type Metric struct { + // Name is the name of the sampled metric. + // + // Names follow the same convention as metric names in the + // runtime/metrics package, meaning they include the unit. + // Names that match with the runtime/metrics package represent + // the same quantity. Note that this corresponds to the + // runtime/metrics package for the Go version this trace was + // collected for. + Name string + + // Value is the sampled value of the metric. + // + // The Value's Kind is tied to the name of the metric, and so is + // guaranteed to be the same for metric samples for the same metric. + Value Value +} + +// Label provides details about a Label event. +type Label struct { + // Label is the label applied to some resource. + Label string + + // Resource is the resource to which this label should be applied. + Resource ResourceID +} + +// Range provides details about a Range event. +type Range struct { + // Name is a human-readable name for the range. + // + // This name can be used to identify the end of the range for the resource + // its scoped to, because only one of each type of range may be active on + // a particular resource. The relevant resource should be obtained from the + // Event that produced these details. The corresponding RangeEnd will have + // an identical name. + Name string + + // Scope is the resource that the range is scoped to. + // + // For example, a ResourceGoroutine scope means that the same goroutine + // must have a start and end for the range, and that goroutine can only + // have one range of a particular name active at any given time. The + // ID that this range is scoped to may be obtained via Event.Goroutine. + // + // The ResourceNone scope means that the range is globally scoped. As a + // result, any goroutine/proc/thread may start or end the range, and only + // one such named range may be active globally at any given time. + // + // For RangeBegin and RangeEnd events, this will always reference some + // resource ID in the current execution context. For RangeActive events, + // this may reference a resource not in the current context. Prefer Scope + // over the current execution context. + Scope ResourceID +} + +// RangeAttributes provides attributes about a completed Range. +type RangeAttribute struct { + // Name is the human-readable name for the range. + Name string + + // Value is the value of the attribute. + Value Value +} + +// TaskID is the internal ID of a task used to disambiguate tasks (even if they +// are of the same type). +type TaskID uint64 + +const ( + // NoTask indicates the lack of a task. + NoTask = TaskID(^uint64(0)) + + // BackgroundTask is the global task that events are attached to if there was + // no other task in the context at the point the event was emitted. + BackgroundTask = TaskID(0) +) + +// Task provides details about a Task event. +type Task struct { + // ID is a unique identifier for the task. + // + // This can be used to associate the beginning of a task with its end. + ID TaskID + + // ParentID is the ID of the parent task. + Parent TaskID + + // Type is the taskType that was passed to runtime/trace.NewTask. + // + // May be "" if a task's TaskBegin event isn't present in the trace. + Type string +} + +// Region provides details about a Region event. +type Region struct { + // Task is the ID of the task this region is associated with. + Task TaskID + + // Type is the regionType that was passed to runtime/trace.StartRegion or runtime/trace.WithRegion. + Type string +} + +// Log provides details about a Log event. +type Log struct { + // Task is the ID of the task this region is associated with. + Task TaskID + + // Category is the category that was passed to runtime/trace.Log or runtime/trace.Logf. + Category string + + // Message is the message that was passed to runtime/trace.Log or runtime/trace.Logf. + Message string +} + +// Stack represents a stack. It's really a handle to a stack and it's trivially comparable. +// +// If two Stacks are equal then their Frames are guaranteed to be identical. If they are not +// equal, however, their Frames may still be equal. +type Stack struct { + table *evTable + id stackID +} + +// Frames is an iterator over the frames in a Stack. +func (s Stack) Frames(yield func(f StackFrame) bool) bool { + if s.id == 0 { + return true + } + stk := s.table.stacks.mustGet(s.id) + for _, pc := range stk.pcs { + f := s.table.pcs[pc] + sf := StackFrame{ + PC: f.pc, + Func: s.table.strings.mustGet(f.funcID), + File: s.table.strings.mustGet(f.fileID), + Line: f.line, + } + if !yield(sf) { + return false + } + } + return true +} + +// NoStack is a sentinel value that can be compared against any Stack value, indicating +// a lack of a stack trace. +var NoStack = Stack{} + +// StackFrame represents a single frame of a stack. +type StackFrame struct { + // PC is the program counter of the function call if this + // is not a leaf frame. If it's a leaf frame, it's the point + // at which the stack trace was taken. + PC uint64 + + // Func is the name of the function this frame maps to. + Func string + + // File is the file which contains the source code of Func. + File string + + // Line is the line number within File which maps to PC. + Line uint64 +} + +// ExperimentalEvent presents a raw view of an experimental event's arguments and thier names. +type ExperimentalEvent struct { + // Name is the name of the event. + Name string + + // ArgNames is the names of the event's arguments in order. + // This may refer to a globally shared slice. Copy before mutating. + ArgNames []string + + // Args contains the event's arguments. + Args []uint64 + + // Data is additional unparsed data that is associated with the experimental event. + // Data is likely to be shared across many ExperimentalEvents, so callers that parse + // Data are encouraged to cache the parse result and look it up by the value of Data. + Data *ExperimentalData +} + +// ExperimentalData represents some raw and unparsed sidecar data present in the trace that is +// associated with certain kinds of experimental events. For example, this data may contain +// tables needed to interpret ExperimentalEvent arguments, or the ExperimentEvent could just be +// a placeholder for a differently encoded event that's actually present in the experimental data. +type ExperimentalData struct { + // Batches contain the actual experimental data, along with metadata about each batch. + Batches []ExperimentalBatch +} + +// ExperimentalBatch represents a packet of unparsed data along with metadata about that packet. +type ExperimentalBatch struct { + // Thread is the ID of the thread that produced a packet of data. + Thread ThreadID + + // Data is a packet of unparsed data all produced by one thread. + Data []byte +} + +// Event represents a single event in the trace. +type Event struct { + table *evTable + ctx schedCtx + base baseEvent +} + +// Kind returns the kind of event that this is. +func (e Event) Kind() EventKind { + return go122Type2Kind[e.base.typ] +} + +// Time returns the timestamp of the event. +func (e Event) Time() Time { + return e.base.time +} + +// Goroutine returns the ID of the goroutine that was executing when +// this event happened. It describes part of the execution context +// for this event. +// +// Note that for goroutine state transitions this always refers to the +// state before the transition. For example, if a goroutine is just +// starting to run on this thread and/or proc, then this will return +// NoGoroutine. In this case, the goroutine starting to run will be +// can be found at Event.StateTransition().Resource. +func (e Event) Goroutine() GoID { + return e.ctx.G +} + +// Proc returns the ID of the proc this event event pertains to. +// +// Note that for proc state transitions this always refers to the +// state before the transition. For example, if a proc is just +// starting to run on this thread, then this will return NoProc. +func (e Event) Proc() ProcID { + return e.ctx.P +} + +// Thread returns the ID of the thread this event pertains to. +// +// Note that for thread state transitions this always refers to the +// state before the transition. For example, if a thread is just +// starting to run, then this will return NoThread. +// +// Note: tracking thread state is not currently supported, so this +// will always return a valid thread ID. However thread state transitions +// may be tracked in the future, and callers must be robust to this +// possibility. +func (e Event) Thread() ThreadID { + return e.ctx.M +} + +// Stack returns a handle to a stack associated with the event. +// +// This represents a stack trace at the current moment in time for +// the current execution context. +func (e Event) Stack() Stack { + if e.base.typ == evSync { + return NoStack + } + if e.base.typ == go122.EvCPUSample { + return Stack{table: e.table, id: stackID(e.base.args[0])} + } + spec := go122.Specs()[e.base.typ] + if len(spec.StackIDs) == 0 { + return NoStack + } + // The stack for the main execution context is always the + // first stack listed in StackIDs. Subtract one from this + // because we've peeled away the timestamp argument. + id := stackID(e.base.args[spec.StackIDs[0]-1]) + if id == 0 { + return NoStack + } + return Stack{table: e.table, id: id} +} + +// Metric returns details about a Metric event. +// +// Panics if Kind != EventMetric. +func (e Event) Metric() Metric { + if e.Kind() != EventMetric { + panic("Metric called on non-Metric event") + } + var m Metric + switch e.base.typ { + case go122.EvProcsChange: + m.Name = "/sched/gomaxprocs:threads" + m.Value = Value{kind: ValueUint64, scalar: e.base.args[0]} + case go122.EvHeapAlloc: + m.Name = "/memory/classes/heap/objects:bytes" + m.Value = Value{kind: ValueUint64, scalar: e.base.args[0]} + case go122.EvHeapGoal: + m.Name = "/gc/heap/goal:bytes" + m.Value = Value{kind: ValueUint64, scalar: e.base.args[0]} + default: + panic(fmt.Sprintf("internal error: unexpected event type for Metric kind: %s", go122.EventString(e.base.typ))) + } + return m +} + +// Label returns details about a Label event. +// +// Panics if Kind != EventLabel. +func (e Event) Label() Label { + if e.Kind() != EventLabel { + panic("Label called on non-Label event") + } + if e.base.typ != go122.EvGoLabel { + panic(fmt.Sprintf("internal error: unexpected event type for Label kind: %s", go122.EventString(e.base.typ))) + } + return Label{ + Label: e.table.strings.mustGet(stringID(e.base.args[0])), + Resource: ResourceID{Kind: ResourceGoroutine, id: int64(e.ctx.G)}, + } +} + +// Range returns details about an EventRangeBegin, EventRangeActive, or EventRangeEnd event. +// +// Panics if Kind != EventRangeBegin, Kind != EventRangeActive, and Kind != EventRangeEnd. +func (e Event) Range() Range { + if kind := e.Kind(); kind != EventRangeBegin && kind != EventRangeActive && kind != EventRangeEnd { + panic("Range called on non-Range event") + } + var r Range + switch e.base.typ { + case go122.EvSTWBegin, go122.EvSTWEnd: + // N.B. ordering.advance smuggles in the STW reason as e.base.args[0] + // for go122.EvSTWEnd (it's already there for Begin). + r.Name = "stop-the-world (" + e.table.strings.mustGet(stringID(e.base.args[0])) + ")" + r.Scope = ResourceID{Kind: ResourceGoroutine, id: int64(e.Goroutine())} + case go122.EvGCBegin, go122.EvGCActive, go122.EvGCEnd: + r.Name = "GC concurrent mark phase" + r.Scope = ResourceID{Kind: ResourceNone} + case go122.EvGCSweepBegin, go122.EvGCSweepActive, go122.EvGCSweepEnd: + r.Name = "GC incremental sweep" + r.Scope = ResourceID{Kind: ResourceProc} + if e.base.typ == go122.EvGCSweepActive { + r.Scope.id = int64(e.base.args[0]) + } else { + r.Scope.id = int64(e.Proc()) + } + r.Scope.id = int64(e.Proc()) + case go122.EvGCMarkAssistBegin, go122.EvGCMarkAssistActive, go122.EvGCMarkAssistEnd: + r.Name = "GC mark assist" + r.Scope = ResourceID{Kind: ResourceGoroutine} + if e.base.typ == go122.EvGCMarkAssistActive { + r.Scope.id = int64(e.base.args[0]) + } else { + r.Scope.id = int64(e.Goroutine()) + } + default: + panic(fmt.Sprintf("internal error: unexpected event type for Range kind: %s", go122.EventString(e.base.typ))) + } + return r +} + +// RangeAttributes returns attributes for a completed range. +// +// Panics if Kind != EventRangeEnd. +func (e Event) RangeAttributes() []RangeAttribute { + if e.Kind() != EventRangeEnd { + panic("Range called on non-Range event") + } + if e.base.typ != go122.EvGCSweepEnd { + return nil + } + return []RangeAttribute{ + { + Name: "bytes swept", + Value: Value{kind: ValueUint64, scalar: e.base.args[0]}, + }, + { + Name: "bytes reclaimed", + Value: Value{kind: ValueUint64, scalar: e.base.args[1]}, + }, + } +} + +// Task returns details about a TaskBegin or TaskEnd event. +// +// Panics if Kind != EventTaskBegin and Kind != EventTaskEnd. +func (e Event) Task() Task { + if kind := e.Kind(); kind != EventTaskBegin && kind != EventTaskEnd { + panic("Task called on non-Task event") + } + parentID := NoTask + var typ string + switch e.base.typ { + case go122.EvUserTaskBegin: + parentID = TaskID(e.base.args[1]) + typ = e.table.strings.mustGet(stringID(e.base.args[2])) + case go122.EvUserTaskEnd: + parentID = TaskID(e.base.extra(version.Go122)[0]) + typ = e.table.getExtraString(extraStringID(e.base.extra(version.Go122)[1])) + default: + panic(fmt.Sprintf("internal error: unexpected event type for Task kind: %s", go122.EventString(e.base.typ))) + } + return Task{ + ID: TaskID(e.base.args[0]), + Parent: parentID, + Type: typ, + } +} + +// Region returns details about a RegionBegin or RegionEnd event. +// +// Panics if Kind != EventRegionBegin and Kind != EventRegionEnd. +func (e Event) Region() Region { + if kind := e.Kind(); kind != EventRegionBegin && kind != EventRegionEnd { + panic("Region called on non-Region event") + } + if e.base.typ != go122.EvUserRegionBegin && e.base.typ != go122.EvUserRegionEnd { + panic(fmt.Sprintf("internal error: unexpected event type for Region kind: %s", go122.EventString(e.base.typ))) + } + return Region{ + Task: TaskID(e.base.args[0]), + Type: e.table.strings.mustGet(stringID(e.base.args[1])), + } +} + +// Log returns details about a Log event. +// +// Panics if Kind != EventLog. +func (e Event) Log() Log { + if e.Kind() != EventLog { + panic("Log called on non-Log event") + } + if e.base.typ != go122.EvUserLog { + panic(fmt.Sprintf("internal error: unexpected event type for Log kind: %s", go122.EventString(e.base.typ))) + } + return Log{ + Task: TaskID(e.base.args[0]), + Category: e.table.strings.mustGet(stringID(e.base.args[1])), + Message: e.table.strings.mustGet(stringID(e.base.args[2])), + } +} + +// StateTransition returns details about a StateTransition event. +// +// Panics if Kind != EventStateTransition. +func (e Event) StateTransition() StateTransition { + if e.Kind() != EventStateTransition { + panic("StateTransition called on non-StateTransition event") + } + var s StateTransition + switch e.base.typ { + case go122.EvProcStart: + s = procStateTransition(ProcID(e.base.args[0]), ProcIdle, ProcRunning) + case go122.EvProcStop: + s = procStateTransition(e.ctx.P, ProcRunning, ProcIdle) + case go122.EvProcSteal: + // N.B. ordering.advance populates e.base.extra. + beforeState := ProcRunning + if go122.ProcStatus(e.base.extra(version.Go122)[0]) == go122.ProcSyscallAbandoned { + // We've lost information because this ProcSteal advanced on a + // SyscallAbandoned state. Treat the P as idle because ProcStatus + // treats SyscallAbandoned as Idle. Otherwise we'll have an invalid + // transition. + beforeState = ProcIdle + } + s = procStateTransition(ProcID(e.base.args[0]), beforeState, ProcIdle) + case go122.EvProcStatus: + // N.B. ordering.advance populates e.base.extra. + s = procStateTransition(ProcID(e.base.args[0]), ProcState(e.base.extra(version.Go122)[0]), go122ProcStatus2ProcState[e.base.args[1]]) + case go122.EvGoCreate, go122.EvGoCreateBlocked: + status := GoRunnable + if e.base.typ == go122.EvGoCreateBlocked { + status = GoWaiting + } + s = goStateTransition(GoID(e.base.args[0]), GoNotExist, status) + s.Stack = Stack{table: e.table, id: stackID(e.base.args[1])} + case go122.EvGoCreateSyscall: + s = goStateTransition(GoID(e.base.args[0]), GoNotExist, GoSyscall) + case go122.EvGoStart: + s = goStateTransition(GoID(e.base.args[0]), GoRunnable, GoRunning) + case go122.EvGoDestroy: + s = goStateTransition(e.ctx.G, GoRunning, GoNotExist) + s.Stack = e.Stack() // This event references the resource the event happened on. + case go122.EvGoDestroySyscall: + s = goStateTransition(e.ctx.G, GoSyscall, GoNotExist) + case go122.EvGoStop: + s = goStateTransition(e.ctx.G, GoRunning, GoRunnable) + s.Reason = e.table.strings.mustGet(stringID(e.base.args[0])) + s.Stack = e.Stack() // This event references the resource the event happened on. + case go122.EvGoBlock: + s = goStateTransition(e.ctx.G, GoRunning, GoWaiting) + s.Reason = e.table.strings.mustGet(stringID(e.base.args[0])) + s.Stack = e.Stack() // This event references the resource the event happened on. + case go122.EvGoUnblock, go122.EvGoSwitch, go122.EvGoSwitchDestroy: + // N.B. GoSwitch and GoSwitchDestroy both emit additional events, but + // the first thing they both do is unblock the goroutine they name, + // identically to an unblock event (even their arguments match). + s = goStateTransition(GoID(e.base.args[0]), GoWaiting, GoRunnable) + case go122.EvGoSyscallBegin: + s = goStateTransition(e.ctx.G, GoRunning, GoSyscall) + s.Stack = e.Stack() // This event references the resource the event happened on. + case go122.EvGoSyscallEnd: + s = goStateTransition(e.ctx.G, GoSyscall, GoRunning) + s.Stack = e.Stack() // This event references the resource the event happened on. + case go122.EvGoSyscallEndBlocked: + s = goStateTransition(e.ctx.G, GoSyscall, GoRunnable) + s.Stack = e.Stack() // This event references the resource the event happened on. + case go122.EvGoStatus, go122.EvGoStatusStack: + // N.B. ordering.advance populates e.base.extra. + s = goStateTransition(GoID(e.base.args[0]), GoState(e.base.extra(version.Go122)[0]), go122GoStatus2GoState[e.base.args[2]]) + default: + panic(fmt.Sprintf("internal error: unexpected event type for StateTransition kind: %s", go122.EventString(e.base.typ))) + } + return s +} + +// Experimental returns a view of the raw event for an experimental event. +// +// Panics if Kind != EventExperimental. +func (e Event) Experimental() ExperimentalEvent { + if e.Kind() != EventExperimental { + panic("Experimental called on non-Experimental event") + } + spec := go122.Specs()[e.base.typ] + argNames := spec.Args[1:] // Skip timestamp; already handled. + return ExperimentalEvent{ + Name: spec.Name, + ArgNames: argNames, + Args: e.base.args[:len(argNames)], + Data: e.table.expData[spec.Experiment], + } +} + +const evSync = ^event.Type(0) + +var go122Type2Kind = [...]EventKind{ + go122.EvCPUSample: EventStackSample, + go122.EvProcsChange: EventMetric, + go122.EvProcStart: EventStateTransition, + go122.EvProcStop: EventStateTransition, + go122.EvProcSteal: EventStateTransition, + go122.EvProcStatus: EventStateTransition, + go122.EvGoCreate: EventStateTransition, + go122.EvGoCreateSyscall: EventStateTransition, + go122.EvGoStart: EventStateTransition, + go122.EvGoDestroy: EventStateTransition, + go122.EvGoDestroySyscall: EventStateTransition, + go122.EvGoStop: EventStateTransition, + go122.EvGoBlock: EventStateTransition, + go122.EvGoUnblock: EventStateTransition, + go122.EvGoSyscallBegin: EventStateTransition, + go122.EvGoSyscallEnd: EventStateTransition, + go122.EvGoSyscallEndBlocked: EventStateTransition, + go122.EvGoStatus: EventStateTransition, + go122.EvSTWBegin: EventRangeBegin, + go122.EvSTWEnd: EventRangeEnd, + go122.EvGCActive: EventRangeActive, + go122.EvGCBegin: EventRangeBegin, + go122.EvGCEnd: EventRangeEnd, + go122.EvGCSweepActive: EventRangeActive, + go122.EvGCSweepBegin: EventRangeBegin, + go122.EvGCSweepEnd: EventRangeEnd, + go122.EvGCMarkAssistActive: EventRangeActive, + go122.EvGCMarkAssistBegin: EventRangeBegin, + go122.EvGCMarkAssistEnd: EventRangeEnd, + go122.EvHeapAlloc: EventMetric, + go122.EvHeapGoal: EventMetric, + go122.EvGoLabel: EventLabel, + go122.EvUserTaskBegin: EventTaskBegin, + go122.EvUserTaskEnd: EventTaskEnd, + go122.EvUserRegionBegin: EventRegionBegin, + go122.EvUserRegionEnd: EventRegionEnd, + go122.EvUserLog: EventLog, + go122.EvGoSwitch: EventStateTransition, + go122.EvGoSwitchDestroy: EventStateTransition, + go122.EvGoCreateBlocked: EventStateTransition, + go122.EvGoStatusStack: EventStateTransition, + go122.EvSpan: EventExperimental, + go122.EvSpanAlloc: EventExperimental, + go122.EvSpanFree: EventExperimental, + go122.EvHeapObject: EventExperimental, + go122.EvHeapObjectAlloc: EventExperimental, + go122.EvHeapObjectFree: EventExperimental, + go122.EvGoroutineStack: EventExperimental, + go122.EvGoroutineStackAlloc: EventExperimental, + go122.EvGoroutineStackFree: EventExperimental, + evSync: EventSync, +} + +var go122GoStatus2GoState = [...]GoState{ + go122.GoRunnable: GoRunnable, + go122.GoRunning: GoRunning, + go122.GoWaiting: GoWaiting, + go122.GoSyscall: GoSyscall, +} + +var go122ProcStatus2ProcState = [...]ProcState{ + go122.ProcRunning: ProcRunning, + go122.ProcIdle: ProcIdle, + go122.ProcSyscall: ProcRunning, + go122.ProcSyscallAbandoned: ProcIdle, +} + +// String returns the event as a human-readable string. +// +// The format of the string is intended for debugging and is subject to change. +func (e Event) String() string { + var sb strings.Builder + fmt.Fprintf(&sb, "M=%d P=%d G=%d", e.Thread(), e.Proc(), e.Goroutine()) + fmt.Fprintf(&sb, " %s Time=%d", e.Kind(), e.Time()) + // Kind-specific fields. + switch kind := e.Kind(); kind { + case EventMetric: + m := e.Metric() + fmt.Fprintf(&sb, " Name=%q Value=%s", m.Name, valueAsString(m.Value)) + case EventLabel: + l := e.Label() + fmt.Fprintf(&sb, " Label=%q Resource=%s", l.Label, l.Resource) + case EventRangeBegin, EventRangeActive, EventRangeEnd: + r := e.Range() + fmt.Fprintf(&sb, " Name=%q Scope=%s", r.Name, r.Scope) + if kind == EventRangeEnd { + fmt.Fprintf(&sb, " Attributes=[") + for i, attr := range e.RangeAttributes() { + if i != 0 { + fmt.Fprintf(&sb, " ") + } + fmt.Fprintf(&sb, "%q=%s", attr.Name, valueAsString(attr.Value)) + } + fmt.Fprintf(&sb, "]") + } + case EventTaskBegin, EventTaskEnd: + t := e.Task() + fmt.Fprintf(&sb, " ID=%d Parent=%d Type=%q", t.ID, t.Parent, t.Type) + case EventRegionBegin, EventRegionEnd: + r := e.Region() + fmt.Fprintf(&sb, " Task=%d Type=%q", r.Task, r.Type) + case EventLog: + l := e.Log() + fmt.Fprintf(&sb, " Task=%d Category=%q Message=%q", l.Task, l.Category, l.Message) + case EventStateTransition: + s := e.StateTransition() + fmt.Fprintf(&sb, " Resource=%s Reason=%q", s.Resource, s.Reason) + switch s.Resource.Kind { + case ResourceGoroutine: + id := s.Resource.Goroutine() + old, new := s.Goroutine() + fmt.Fprintf(&sb, " GoID=%d %s->%s", id, old, new) + case ResourceProc: + id := s.Resource.Proc() + old, new := s.Proc() + fmt.Fprintf(&sb, " ProcID=%d %s->%s", id, old, new) + } + if s.Stack != NoStack { + fmt.Fprintln(&sb) + fmt.Fprintln(&sb, "TransitionStack=") + s.Stack.Frames(func(f StackFrame) bool { + fmt.Fprintf(&sb, "\t%s @ 0x%x\n", f.Func, f.PC) + fmt.Fprintf(&sb, "\t\t%s:%d\n", f.File, f.Line) + return true + }) + } + case EventExperimental: + r := e.Experimental() + fmt.Fprintf(&sb, " Name=%s ArgNames=%v Args=%v", r.Name, r.ArgNames, r.Args) + } + if stk := e.Stack(); stk != NoStack { + fmt.Fprintln(&sb) + fmt.Fprintln(&sb, "Stack=") + stk.Frames(func(f StackFrame) bool { + fmt.Fprintf(&sb, "\t%s @ 0x%x\n", f.Func, f.PC) + fmt.Fprintf(&sb, "\t\t%s:%d\n", f.File, f.Line) + return true + }) + } + return sb.String() +} + +// validateTableIDs checks to make sure lookups in e.table +// will work. +func (e Event) validateTableIDs() error { + if e.base.typ == evSync { + return nil + } + spec := go122.Specs()[e.base.typ] + + // Check stacks. + for _, i := range spec.StackIDs { + id := stackID(e.base.args[i-1]) + _, ok := e.table.stacks.get(id) + if !ok { + return fmt.Errorf("found invalid stack ID %d for event %s", id, spec.Name) + } + } + // N.B. Strings referenced by stack frames are validated + // early on, when reading the stacks in to begin with. + + // Check strings. + for _, i := range spec.StringIDs { + id := stringID(e.base.args[i-1]) + _, ok := e.table.strings.get(id) + if !ok { + return fmt.Errorf("found invalid string ID %d for event %s", id, spec.Name) + } + } + return nil +} + +func syncEvent(table *evTable, ts Time) Event { + return Event{ + table: table, + ctx: schedCtx{ + G: NoGoroutine, + P: NoProc, + M: NoThread, + }, + base: baseEvent{ + typ: evSync, + time: ts, + }, + } +} diff --git a/vendor/golang.org/x/exp/trace/flightrecorder.go b/vendor/golang.org/x/exp/trace/flightrecorder.go new file mode 100644 index 000000000000..95ae8c627b54 --- /dev/null +++ b/vendor/golang.org/x/exp/trace/flightrecorder.go @@ -0,0 +1,367 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.22 + +package trace + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math/bits" + "runtime/trace" + "slices" + "sync" + "time" + _ "unsafe" // for go:linkname + + "golang.org/x/exp/trace/internal/event/go122" +) + +// FlightRecorder represents a flight recording configuration. +// +// Flight recording holds execution trace data in a circular buffer representing +// the most recent execution data. +// +// Only one flight recording may be active at any given time. +type FlightRecorder struct { + err error + + // State specific to the recorder. + header [16]byte + active rawGeneration + ringMu sync.Mutex + ring []rawGeneration + + // Externally-set options. + targetSize int + targetPeriod time.Duration + + enabled bool // whether the flight recorder is enabled. + writing sync.Mutex // protects concurrent calls to WriteTo + + // The values of targetSize and targetPeriod we've committed to since the last Start. + wantSize int + wantDur time.Duration +} + +// NewFlightRecorder creates a new flight recording configuration. +func NewFlightRecorder() *FlightRecorder { + return &FlightRecorder{ + // These are just some optimistic, reasonable defaults. + // + // In reality we're also bound by whatever the runtime defaults are, because + // we currently have no way to change them. + // + // TODO(mknyszek): Consider adding a function that allows mutating one or + // both of these values' equivalents in the runtime. + targetSize: 10 << 20, // 10 MiB. + targetPeriod: 10 * time.Second, + } +} + +// SetPeriod sets the approximate time duration that the flight recorder's circular buffer +// represents. +// +// Note that SetPeriod does not make any guarantees on the amount of time the trace +// produced by WriteTo will represent. +// This is just a hint to the runtime to enable some control the resulting trace. +// +// The initial period is implementation defined, but can be assumed to be on the order +// of seconds. +// +// Adjustments to this value will not apply to an active flight recorder, and will not apply +// if tracing is already enabled via trace.Start. All tracing must be stopped and started +// again to change this value. +func (r *FlightRecorder) SetPeriod(d time.Duration) { + r.targetPeriod = d +} + +// SetSize sets the approximate size of the flight recorder's circular buffer. +// +// This generally takes precedence over the duration passed to SetPeriod. +// However, it does not make any guarantees on the size of the data WriteTo will write. +// This is just a hint to the runtime to enable some control over the memory overheads +// of tracing. +// +// The initial size is implementation defined. +// +// Adjustments to this value will not apply to an active flight recorder, and will not apply +// if tracing is already enabled via trace.Start. All tracing must be stopped and started +// again to change this value. +func (r *FlightRecorder) SetSize(bytes int) { + r.targetSize = bytes +} + +// A recorder receives bytes from the runtime tracer, processes it. +type recorder struct { + r *FlightRecorder + + headerReceived bool +} + +func (w *recorder) Write(p []byte) (n int, err error) { + r := w.r + + defer func() { + if err != nil { + // Propagate errors to the flightrecorder. + if r.err == nil { + r.err = err + } + trace.Stop() // Stop the tracer, preventing further writes. + } + }() + + rd := bytes.NewReader(p) + + if !w.headerReceived { + if len(p) < len(r.header) { + return 0, fmt.Errorf("expected at least %d bytes in the first write", len(r.header)) + } + rd.Read(r.header[:]) + w.headerReceived = true + } + + b, gen, err := readBatch(rd) // Every write from the runtime is guaranteed to be a complete batch. + if err == io.EOF { + if rd.Len() > 0 { + return len(p) - rd.Len(), errors.New("short read") + } + return len(p), nil + } + if err != nil { + return len(p) - rd.Len(), err + } + + // Check if we're entering a new generation. + if r.active.gen != 0 && r.active.gen+1 == gen { + r.ringMu.Lock() + + // Validate r.active.freq before we use it. It's required for a generation + // to not be considered broken, and without it, we can't correctly handle + // SetPeriod. + if r.active.freq == 0 { + return len(p) - rd.Len(), fmt.Errorf("broken trace: failed to find frequency event in generation %d", r.active.gen) + } + + // Get the current trace clock time. + now := traceTimeNow(r.active.freq) + + // Add the current generation to the ring. Make sure we always have at least one + // complete generation by putting the active generation onto the new list, regardless + // of whatever our settings are. + // + // N.B. Let's completely replace the ring here, so that WriteTo can just make a copy + // and not worry about aliasing. This creates allocations, but at a very low rate. + newRing := []rawGeneration{r.active} + size := r.active.size + for i := len(r.ring) - 1; i >= 0; i-- { + // Stop adding older generations if the new ring already exceeds the thresholds. + // This ensures we keep generations that cross a threshold, but not any that lie + // entirely outside it. + if size > r.wantSize || now.Sub(newRing[len(newRing)-1].minTraceTime()) > r.wantDur { + break + } + size += r.ring[i].size + newRing = append(newRing, r.ring[i]) + } + slices.Reverse(newRing) + r.ring = newRing + r.ringMu.Unlock() + + // Start a new active generation. + r.active = rawGeneration{} + } + + // Obtain the frequency if this is a frequency batch. + if b.isFreqBatch() { + freq, err := parseFreq(b) + if err != nil { + return len(p) - rd.Len(), err + } + r.active.freq = freq + } + + // Append the batch to the current generation. + if r.active.gen == 0 { + r.active.gen = gen + } + if r.active.minTime == 0 || r.active.minTime > b.time { + r.active.minTime = b.time + } + r.active.size += 1 + r.active.size += uvarintSize(gen) + r.active.size += uvarintSize(uint64(b.m)) + r.active.size += uvarintSize(uint64(b.time)) + r.active.size += uvarintSize(uint64(len(b.data))) + r.active.size += len(b.data) + r.active.batches = append(r.active.batches, b) + + return len(p) - rd.Len(), nil +} + +// Start begins flight recording. Only one flight recorder or one call to [runtime/trace.Start] +// may be active at any given time. Returns an error if starting the flight recorder would +// violate this rule. +func (r *FlightRecorder) Start() error { + if r.enabled { + return fmt.Errorf("cannot enable a enabled flight recorder") + } + + r.wantSize = r.targetSize + r.wantDur = r.targetPeriod + r.err = nil + + // Start tracing, data is sent to a recorder which forwards it to our own + // storage. + if err := trace.Start(&recorder{r: r}); err != nil { + return err + } + + r.enabled = true + return nil +} + +// Stop ends flight recording. It waits until any concurrent [FlightRecorder.WriteTo] calls exit. +// Returns an error if the flight recorder is inactive. +func (r *FlightRecorder) Stop() error { + if !r.enabled { + return fmt.Errorf("cannot disable a disabled flight recorder") + } + r.enabled = false + trace.Stop() + + // Reset all state. No need to lock because the reader has already exited. + r.active = rawGeneration{} + r.ring = nil + return r.err +} + +// Enabled returns true if the flight recorder is active. Specifically, it will return true if +// Start did not return an error, and Stop has not yet been called. +// It is safe to call from multiple goroutines simultaneously. +func (r *FlightRecorder) Enabled() bool { + return r.enabled +} + +// ErrSnapshotActive indicates that a call to WriteTo was made while one was already in progress. +// If the caller of WriteTo sees this error, they should use the result from the other call to WriteTo. +var ErrSnapshotActive = fmt.Errorf("call to WriteTo for trace.FlightRecorder already in progress") + +// WriteTo takes a snapshots of the circular buffer's contents and writes the execution data to w. +// Returns the number of bytes written and an error. +// An error is returned upon failure to write to w or if the flight recorder is inactive. +// Only one goroutine may execute WriteTo at a time, but it is safe to call from multiple goroutines. +// If a goroutine calls WriteTo while another goroutine is currently executing it, WriteTo will return +// ErrSnapshotActive to that goroutine. +func (r *FlightRecorder) WriteTo(w io.Writer) (total int, err error) { + if !r.enabled { + return 0, fmt.Errorf("cannot snapshot a disabled flight recorder") + } + if !r.writing.TryLock() { + return 0, ErrSnapshotActive + } + defer r.writing.Unlock() + + // Force a global buffer flush twice. + // + // This is pretty unfortunate, but because the signal that a generation is done is that a new + // generation appears in the trace *or* the trace stream ends, the recorder goroutine will + // have no idea when to add a generation to the ring if we just flush once. If we flush twice, + // at least the first one will end up on the ring, which is the one we wanted anyway. + // + // In a runtime-internal implementation this is a non-issue. The runtime is fully aware + // of what generations are complete, so only one flush is necessary. + runtime_traceAdvance(false) + runtime_traceAdvance(false) + + // Now that everything has been flushed and written, grab whatever we have. + // + // N.B. traceAdvance blocks until the tracer goroutine has actually written everything + // out, which means the generation we just flushed must have been already been observed + // by the recorder goroutine. Because we flushed twice, the first flush is guaranteed to + // have been both completed *and* processed by the recorder goroutine. + r.ringMu.Lock() + gens := r.ring + r.ringMu.Unlock() + + // Write the header. + total, err = w.Write(r.header[:]) + if err != nil { + return total, err + } + + // Helper for writing varints. + var varintBuf [binary.MaxVarintLen64]byte + writeUvarint := func(u uint64) error { + v := binary.PutUvarint(varintBuf[:], u) + n, err := w.Write(varintBuf[:v]) + total += n + return err + } + + // Write all the data. + for _, gen := range gens { + for _, batch := range gen.batches { + // Rewrite the batch header event with four arguments: gen, M ID, timestamp, and data length. + n, err := w.Write([]byte{byte(go122.EvEventBatch)}) + total += n + if err != nil { + return total, err + } + if err := writeUvarint(gen.gen); err != nil { + return total, err + } + if err := writeUvarint(uint64(batch.m)); err != nil { + return total, err + } + if err := writeUvarint(uint64(batch.time)); err != nil { + return total, err + } + if err := writeUvarint(uint64(len(batch.data))); err != nil { + return total, err + } + + // Write batch data. + n, err = w.Write(batch.data) + total += n + if err != nil { + return total, err + } + } + } + return total, nil +} + +type rawGeneration struct { + gen uint64 + size int + minTime timestamp + freq frequency + batches []batch +} + +func (r *rawGeneration) minTraceTime() Time { + return r.freq.mul(r.minTime) +} + +func traceTimeNow(freq frequency) Time { + // TODO(mknyszek): It's unfortunate that we have to rely on runtime-internal details + // like this. This would be better off in the runtime. + return freq.mul(timestamp(runtime_traceClockNow())) +} + +func uvarintSize(x uint64) int { + return 1 + bits.Len64(x)/7 +} + +//go:linkname runtime_traceAdvance runtime.traceAdvance +func runtime_traceAdvance(stopTrace bool) + +//go:linkname runtime_traceClockNow runtime.traceClockNow +func runtime_traceClockNow() int64 diff --git a/vendor/golang.org/x/exp/trace/gen.bash b/vendor/golang.org/x/exp/trace/gen.bash new file mode 100644 index 000000000000..a34078a39b32 --- /dev/null +++ b/vendor/golang.org/x/exp/trace/gen.bash @@ -0,0 +1,99 @@ +#!/usr/bin/env bash +# Copyright 2023 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# This script copies this directory to golang.org/x/exp/trace. +# Just point it at a Go commit or a local Go checkout. + +set -e + +if [ "$#" -ne 1 ]; then + echo 'gen.bash expects one argument: a go.googlesource.com/go commit hash to generate the package from or a path to a Go checkout' + exit 1 +fi + +# Determine the source. +if [ -d $1 ]; then + echo "assuming Go checkout at $1..." + + # Select the Go checkout. + GODIR=$1 +else + echo "using $1 as a commit hash..." + + # Check out Go. + TMP=$(mktemp -d) + git -C $TMP clone https://go.googlesource.com/go + git -C $TMP/go checkout $1 + GODIR=$TMP/go +fi + +# Define src and dst. +SRC=$GODIR/src/internal/trace +DST=$(dirname $0) + +# Copy. +rsync -av --delete $SRC/ $DST + +# Remove the trace_test.go file and the testprogs it invokes. +# This really tests the tracer, it's not necessary to it bring along +# The trace tests are also problematic because they fail to run on +# Go versions before tip. The testprog directory is problematic because +# of //go:build ignore, so we'd have to complicate the logic below to +# support it. +rm $DST/trace_test.go +rm -r $DST/testdata/testprog + +# Remove the oldtrace testdata to avoid checking in new binary files. +# Remove oldtrace_test.go and internal/oldtrace/parser_test.go because +# they fail without this data. +rm -r $DST/internal/oldtrace/testdata +rm $DST/oldtrace_test.go +rm $DST/internal/oldtrace/parser_test.go + +# Remove files that are only pertinent to cmd/trace. +rm $DST/export_test.go +rm $DST/gc*.go +rm $DST/mud*.go +rm $DST/summary*.go +rm -r $DST/traceviewer + +# Remove mktests.go because its a //go:build ignore file, so it would +# complicate the logic below. This codebase isn't the source of truth +# anyway. +rm $DST/testdata/mktests.go + +# Make some packages internal. +mv $DST/raw $DST/internal/raw +mv $DST/event $DST/internal/event +mv $DST/version $DST/internal/version +mv $DST/testtrace $DST/internal/testtrace + +# Move the debug commands out of testdata. +mv $DST/testdata/cmd $DST/cmd + +# Fix up import paths. +find $DST -name '*.go' | xargs -- sed -i'.tmp' -e 's internal/trace golang.org/x/exp/trace ' +find $DST -name '*.go' | xargs -- sed -i'.tmp' -e 's golang.org/x/exp/trace/raw golang.org/x/exp/trace/internal/raw ' +find $DST -name '*.go' | xargs -- sed -i'.tmp' -e 's golang.org/x/exp/trace/event golang.org/x/exp/trace/internal/event ' +find $DST -name '*.go' | xargs -- sed -i'.tmp' -e 's golang.org/x/exp/trace/event/go122 golang.org/x/exp/trace/internal/event/go122 ' +find $DST -name '*.go' | xargs -- sed -i'.tmp' -e 's golang.org/x/exp/trace/version golang.org/x/exp/trace/internal/version ' +find $DST -name '*.go' | xargs -- sed -i'.tmp' -e 's golang.org/x/exp/trace/testtrace golang.org/x/exp/trace/internal/testtrace ' +find $DST -name '*.go' | xargs -- sed -i'.tmp' -e 's internal/txtar golang.org/x/tools/txtar ' + +# Add build tag for Go 1.21 and generated code comment. +find $DST -name '*.go' | xargs -- sed -i'.tmp' -e '/LICENSE file./a \ +\ +// Code generated by "gen.bash" from internal/trace; DO NOT EDIT.\ +\ +//go:build go1.21' + +# Format the files. +find $DST -name '*.go' | xargs -- gofmt -w -s + +# Delete sed backups +find $DST -name '*.go.tmp' -delete + +# Restore known files. +git checkout gen.bash flightrecorder.go flightrecorder_test.go diff --git a/vendor/golang.org/x/exp/trace/generation.go b/vendor/golang.org/x/exp/trace/generation.go new file mode 100644 index 000000000000..c4fa1f9b3a95 --- /dev/null +++ b/vendor/golang.org/x/exp/trace/generation.go @@ -0,0 +1,458 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by "gen.bash" from internal/trace; DO NOT EDIT. + +//go:build go1.21 + +package trace + +import ( + "bufio" + "bytes" + "cmp" + "encoding/binary" + "fmt" + "io" + "slices" + "strings" + + "golang.org/x/exp/trace/internal/event" + "golang.org/x/exp/trace/internal/event/go122" +) + +// generation contains all the trace data for a single +// trace generation. It is purely data: it does not +// track any parse state nor does it contain a cursor +// into the generation. +type generation struct { + gen uint64 + batches map[ThreadID][]batch + batchMs []ThreadID + cpuSamples []cpuSample + *evTable +} + +// spilledBatch represents a batch that was read out for the next generation, +// while reading the previous one. It's passed on when parsing the next +// generation. +type spilledBatch struct { + gen uint64 + *batch +} + +// readGeneration buffers and decodes the structural elements of a trace generation +// out of r. spill is the first batch of the new generation (already buffered and +// parsed from reading the last generation). Returns the generation and the first +// batch read of the next generation, if any. +// +// If gen is non-nil, it is valid and must be processed before handling the returned +// error. +func readGeneration(r *bufio.Reader, spill *spilledBatch) (*generation, *spilledBatch, error) { + g := &generation{ + evTable: &evTable{ + pcs: make(map[uint64]frame), + }, + batches: make(map[ThreadID][]batch), + } + // Process the spilled batch. + if spill != nil { + g.gen = spill.gen + if err := processBatch(g, *spill.batch); err != nil { + return nil, nil, err + } + spill = nil + } + // Read batches one at a time until we either hit EOF or + // the next generation. + var spillErr error + for { + b, gen, err := readBatch(r) + if err == io.EOF { + break + } + if err != nil { + if g.gen != 0 { + // This is an error reading the first batch of the next generation. + // This is fine. Let's forge ahead assuming that what we've got so + // far is fine. + spillErr = err + break + } + return nil, nil, err + } + if gen == 0 { + // 0 is a sentinel used by the runtime, so we'll never see it. + return nil, nil, fmt.Errorf("invalid generation number %d", gen) + } + if g.gen == 0 { + // Initialize gen. + g.gen = gen + } + if gen == g.gen+1 { // TODO: advance this the same way the runtime does. + spill = &spilledBatch{gen: gen, batch: &b} + break + } + if gen != g.gen { + // N.B. Fail as fast as possible if we see this. At first it + // may seem prudent to be fault-tolerant and assume we have a + // complete generation, parsing and returning that first. However, + // if the batches are mixed across generations then it's likely + // we won't be able to parse this generation correctly at all. + // Rather than return a cryptic error in that case, indicate the + // problem as soon as we see it. + return nil, nil, fmt.Errorf("generations out of order") + } + if err := processBatch(g, b); err != nil { + return nil, nil, err + } + } + + // Check some invariants. + if g.freq == 0 { + return nil, nil, fmt.Errorf("no frequency event found") + } + // N.B. Trust that the batch order is correct. We can't validate the batch order + // by timestamp because the timestamps could just be plain wrong. The source of + // truth is the order things appear in the trace and the partial order sequence + // numbers on certain events. If it turns out the batch order is actually incorrect + // we'll very likely fail to advance a partial order from the frontier. + + // Compactify stacks and strings for better lookup performance later. + g.stacks.compactify() + g.strings.compactify() + + // Validate stacks. + if err := validateStackStrings(&g.stacks, &g.strings, g.pcs); err != nil { + return nil, nil, err + } + + // Fix up the CPU sample timestamps, now that we have freq. + for i := range g.cpuSamples { + s := &g.cpuSamples[i] + s.time = g.freq.mul(timestamp(s.time)) + } + // Sort the CPU samples. + slices.SortFunc(g.cpuSamples, func(a, b cpuSample) int { + return cmp.Compare(a.time, b.time) + }) + return g, spill, spillErr +} + +// processBatch adds the batch to the generation. +func processBatch(g *generation, b batch) error { + switch { + case b.isStringsBatch(): + if err := addStrings(&g.strings, b); err != nil { + return err + } + case b.isStacksBatch(): + if err := addStacks(&g.stacks, g.pcs, b); err != nil { + return err + } + case b.isCPUSamplesBatch(): + samples, err := addCPUSamples(g.cpuSamples, b) + if err != nil { + return err + } + g.cpuSamples = samples + case b.isFreqBatch(): + freq, err := parseFreq(b) + if err != nil { + return err + } + if g.freq != 0 { + return fmt.Errorf("found multiple frequency events") + } + g.freq = freq + case b.exp != event.NoExperiment: + if g.expData == nil { + g.expData = make(map[event.Experiment]*ExperimentalData) + } + if err := addExperimentalData(g.expData, b); err != nil { + return err + } + default: + if _, ok := g.batches[b.m]; !ok { + g.batchMs = append(g.batchMs, b.m) + } + g.batches[b.m] = append(g.batches[b.m], b) + } + return nil +} + +// validateStackStrings makes sure all the string references in +// the stack table are present in the string table. +func validateStackStrings( + stacks *dataTable[stackID, stack], + strings *dataTable[stringID, string], + frames map[uint64]frame, +) error { + var err error + stacks.forEach(func(id stackID, stk stack) bool { + for _, pc := range stk.pcs { + frame, ok := frames[pc] + if !ok { + err = fmt.Errorf("found unknown pc %x for stack %d", pc, id) + return false + } + _, ok = strings.get(frame.funcID) + if !ok { + err = fmt.Errorf("found invalid func string ID %d for stack %d", frame.funcID, id) + return false + } + _, ok = strings.get(frame.fileID) + if !ok { + err = fmt.Errorf("found invalid file string ID %d for stack %d", frame.fileID, id) + return false + } + } + return true + }) + return err +} + +// addStrings takes a batch whose first byte is an EvStrings event +// (indicating that the batch contains only strings) and adds each +// string contained therein to the provided strings map. +func addStrings(stringTable *dataTable[stringID, string], b batch) error { + if !b.isStringsBatch() { + return fmt.Errorf("internal error: addStrings called on non-string batch") + } + r := bytes.NewReader(b.data) + hdr, err := r.ReadByte() // Consume the EvStrings byte. + if err != nil || event.Type(hdr) != go122.EvStrings { + return fmt.Errorf("missing strings batch header") + } + + var sb strings.Builder + for r.Len() != 0 { + // Read the header. + ev, err := r.ReadByte() + if err != nil { + return err + } + if event.Type(ev) != go122.EvString { + return fmt.Errorf("expected string event, got %d", ev) + } + + // Read the string's ID. + id, err := binary.ReadUvarint(r) + if err != nil { + return err + } + + // Read the string's length. + len, err := binary.ReadUvarint(r) + if err != nil { + return err + } + if len > go122.MaxStringSize { + return fmt.Errorf("invalid string size %d, maximum is %d", len, go122.MaxStringSize) + } + + // Copy out the string. + n, err := io.CopyN(&sb, r, int64(len)) + if n != int64(len) { + return fmt.Errorf("failed to read full string: read %d but wanted %d", n, len) + } + if err != nil { + return fmt.Errorf("copying string data: %w", err) + } + + // Add the string to the map. + s := sb.String() + sb.Reset() + if err := stringTable.insert(stringID(id), s); err != nil { + return err + } + } + return nil +} + +// addStacks takes a batch whose first byte is an EvStacks event +// (indicating that the batch contains only stacks) and adds each +// string contained therein to the provided stacks map. +func addStacks(stackTable *dataTable[stackID, stack], pcs map[uint64]frame, b batch) error { + if !b.isStacksBatch() { + return fmt.Errorf("internal error: addStacks called on non-stacks batch") + } + r := bytes.NewReader(b.data) + hdr, err := r.ReadByte() // Consume the EvStacks byte. + if err != nil || event.Type(hdr) != go122.EvStacks { + return fmt.Errorf("missing stacks batch header") + } + + for r.Len() != 0 { + // Read the header. + ev, err := r.ReadByte() + if err != nil { + return err + } + if event.Type(ev) != go122.EvStack { + return fmt.Errorf("expected stack event, got %d", ev) + } + + // Read the stack's ID. + id, err := binary.ReadUvarint(r) + if err != nil { + return err + } + + // Read how many frames are in each stack. + nFrames, err := binary.ReadUvarint(r) + if err != nil { + return err + } + if nFrames > go122.MaxFramesPerStack { + return fmt.Errorf("invalid stack size %d, maximum is %d", nFrames, go122.MaxFramesPerStack) + } + + // Each frame consists of 4 fields: pc, funcID (string), fileID (string), line. + frames := make([]uint64, 0, nFrames) + for i := uint64(0); i < nFrames; i++ { + // Read the frame data. + pc, err := binary.ReadUvarint(r) + if err != nil { + return fmt.Errorf("reading frame %d's PC for stack %d: %w", i+1, id, err) + } + funcID, err := binary.ReadUvarint(r) + if err != nil { + return fmt.Errorf("reading frame %d's funcID for stack %d: %w", i+1, id, err) + } + fileID, err := binary.ReadUvarint(r) + if err != nil { + return fmt.Errorf("reading frame %d's fileID for stack %d: %w", i+1, id, err) + } + line, err := binary.ReadUvarint(r) + if err != nil { + return fmt.Errorf("reading frame %d's line for stack %d: %w", i+1, id, err) + } + frames = append(frames, pc) + + if _, ok := pcs[pc]; !ok { + pcs[pc] = frame{ + pc: pc, + funcID: stringID(funcID), + fileID: stringID(fileID), + line: line, + } + } + } + + // Add the stack to the map. + if err := stackTable.insert(stackID(id), stack{pcs: frames}); err != nil { + return err + } + } + return nil +} + +// addCPUSamples takes a batch whose first byte is an EvCPUSamples event +// (indicating that the batch contains only CPU samples) and adds each +// sample contained therein to the provided samples list. +func addCPUSamples(samples []cpuSample, b batch) ([]cpuSample, error) { + if !b.isCPUSamplesBatch() { + return nil, fmt.Errorf("internal error: addCPUSamples called on non-CPU-sample batch") + } + r := bytes.NewReader(b.data) + hdr, err := r.ReadByte() // Consume the EvCPUSamples byte. + if err != nil || event.Type(hdr) != go122.EvCPUSamples { + return nil, fmt.Errorf("missing CPU samples batch header") + } + + for r.Len() != 0 { + // Read the header. + ev, err := r.ReadByte() + if err != nil { + return nil, err + } + if event.Type(ev) != go122.EvCPUSample { + return nil, fmt.Errorf("expected CPU sample event, got %d", ev) + } + + // Read the sample's timestamp. + ts, err := binary.ReadUvarint(r) + if err != nil { + return nil, err + } + + // Read the sample's M. + m, err := binary.ReadUvarint(r) + if err != nil { + return nil, err + } + mid := ThreadID(m) + + // Read the sample's P. + p, err := binary.ReadUvarint(r) + if err != nil { + return nil, err + } + pid := ProcID(p) + + // Read the sample's G. + g, err := binary.ReadUvarint(r) + if err != nil { + return nil, err + } + goid := GoID(g) + if g == 0 { + goid = NoGoroutine + } + + // Read the sample's stack. + s, err := binary.ReadUvarint(r) + if err != nil { + return nil, err + } + + // Add the sample to the slice. + samples = append(samples, cpuSample{ + schedCtx: schedCtx{ + M: mid, + P: pid, + G: goid, + }, + time: Time(ts), // N.B. this is really a "timestamp," not a Time. + stack: stackID(s), + }) + } + return samples, nil +} + +// parseFreq parses out a lone EvFrequency from a batch. +func parseFreq(b batch) (frequency, error) { + if !b.isFreqBatch() { + return 0, fmt.Errorf("internal error: parseFreq called on non-frequency batch") + } + r := bytes.NewReader(b.data) + r.ReadByte() // Consume the EvFrequency byte. + + // Read the frequency. It'll come out as timestamp units per second. + f, err := binary.ReadUvarint(r) + if err != nil { + return 0, err + } + // Convert to nanoseconds per timestamp unit. + return frequency(1.0 / (float64(f) / 1e9)), nil +} + +// addExperimentalData takes an experimental batch and adds it to the ExperimentalData +// for the experiment its a part of. +func addExperimentalData(expData map[event.Experiment]*ExperimentalData, b batch) error { + if b.exp == event.NoExperiment { + return fmt.Errorf("internal error: addExperimentalData called on non-experimental batch") + } + ed, ok := expData[b.exp] + if !ok { + ed = new(ExperimentalData) + expData[b.exp] = ed + } + ed.Batches = append(ed.Batches, ExperimentalBatch{ + Thread: b.m, + Data: b.data, + }) + return nil +} diff --git a/vendor/golang.org/x/exp/trace/internal/event/event.go b/vendor/golang.org/x/exp/trace/internal/event/event.go new file mode 100644 index 000000000000..cfabfa6cbfaf --- /dev/null +++ b/vendor/golang.org/x/exp/trace/internal/event/event.go @@ -0,0 +1,106 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by "gen.bash" from internal/trace; DO NOT EDIT. + +//go:build go1.21 + +package event + +// Type is the common in-memory representation of the low-leve +type Type uint8 + +// Spec is a specification for a trace event. It contains sufficient information +// to perform basic parsing of any trace event for any version of Go. +type Spec struct { + // Name is the human-readable name of the trace event. + Name string + + // Args contains the names of each trace event's argument. + // Its length determines the number of arguments an event has. + // + // Argument names follow a certain structure and this structure + // is relied on by the testing framework to type-check arguments. + // The structure is is: + // + // (?P[A-Za-z]+_)?(?P[A-Za-z]+) + // + // In sum, it's an optional name followed by a type. If the name + // is present, it is separated from the type with an underscore. + // The valid argument types and the Go types they map to are listed + // in the ArgTypes variable. + Args []string + + // StringIDs indicates which of the arguments are string IDs. + StringIDs []int + + // StackIDs indicates which of the arguments are stack IDs. + // + // The list is not sorted. The first index always refers to + // the main stack for the current execution context of the event. + StackIDs []int + + // StartEv indicates the event type of the corresponding "start" + // event, if this event is an "end," for a pair of events that + // represent a time range. + StartEv Type + + // IsTimedEvent indicates whether this is an event that both + // appears in the main event stream and is surfaced to the + // trace reader. + // + // Events that are not "timed" are considered "structural" + // since they either need significant reinterpretation or + // otherwise aren't actually surfaced by the trace reader. + IsTimedEvent bool + + // HasData is true if the event has trailer consisting of a + // varint length followed by unencoded bytes of some data. + // + // An event may not be both a timed event and have data. + HasData bool + + // IsStack indicates that the event represents a complete + // stack trace. Specifically, it means that after the arguments + // there's a varint length, followed by 4*length varints. Each + // group of 4 represents the PC, file ID, func ID, and line number + // in that order. + IsStack bool + + // Experiment indicates the ID of an experiment this event is associated + // with. If Experiment is not NoExperiment, then the event is experimental + // and will be exposed as an EventExperiment. + Experiment Experiment +} + +// ArgTypes is a list of valid argument types for use in Args. +// +// See the documentation of Args for more details. +var ArgTypes = [...]string{ + "seq", // sequence number + "pstatus", // P status + "gstatus", // G status + "g", // trace.GoID + "m", // trace.ThreadID + "p", // trace.ProcID + "string", // string ID + "stack", // stack ID + "value", // uint64 + "task", // trace.TaskID +} + +// Names is a helper that produces a mapping of event names to event types. +func Names(specs []Spec) map[string]Type { + nameToType := make(map[string]Type) + for i, spec := range specs { + nameToType[spec.Name] = Type(byte(i)) + } + return nameToType +} + +// Experiment is an experiment ID that events may be associated with. +type Experiment uint + +// NoExperiment is the reserved ID 0 indicating no experiment. +const NoExperiment Experiment = 0 diff --git a/vendor/golang.org/x/exp/trace/internal/event/go122/event.go b/vendor/golang.org/x/exp/trace/internal/event/go122/event.go new file mode 100644 index 000000000000..5a8761fcbf55 --- /dev/null +++ b/vendor/golang.org/x/exp/trace/internal/event/go122/event.go @@ -0,0 +1,515 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by "gen.bash" from internal/trace; DO NOT EDIT. + +//go:build go1.21 + +package go122 + +import ( + "fmt" + "golang.org/x/exp/trace/internal/event" +) + +const ( + EvNone event.Type = iota // unused + + // Structural events. + EvEventBatch // start of per-M batch of events [generation, M ID, timestamp, batch length] + EvStacks // start of a section of the stack table [...EvStack] + EvStack // stack table entry [ID, ...{PC, func string ID, file string ID, line #}] + EvStrings // start of a section of the string dictionary [...EvString] + EvString // string dictionary entry [ID, length, string] + EvCPUSamples // start of a section of CPU samples [...EvCPUSample] + EvCPUSample // CPU profiling sample [timestamp, M ID, P ID, goroutine ID, stack ID] + EvFrequency // timestamp units per sec [freq] + + // Procs. + EvProcsChange // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack ID] + EvProcStart // start of P [timestamp, P ID, P seq] + EvProcStop // stop of P [timestamp] + EvProcSteal // P was stolen [timestamp, P ID, P seq, M ID] + EvProcStatus // P status at the start of a generation [timestamp, P ID, status] + + // Goroutines. + EvGoCreate // goroutine creation [timestamp, new goroutine ID, new stack ID, stack ID] + EvGoCreateSyscall // goroutine appears in syscall (cgo callback) [timestamp, new goroutine ID] + EvGoStart // goroutine starts running [timestamp, goroutine ID, goroutine seq] + EvGoDestroy // goroutine ends [timestamp] + EvGoDestroySyscall // goroutine ends in syscall (cgo callback) [timestamp] + EvGoStop // goroutine yields its time, but is runnable [timestamp, reason, stack ID] + EvGoBlock // goroutine blocks [timestamp, reason, stack ID] + EvGoUnblock // goroutine is unblocked [timestamp, goroutine ID, goroutine seq, stack ID] + EvGoSyscallBegin // syscall enter [timestamp, P seq, stack ID] + EvGoSyscallEnd // syscall exit [timestamp] + EvGoSyscallEndBlocked // syscall exit and it blocked at some point [timestamp] + EvGoStatus // goroutine status at the start of a generation [timestamp, goroutine ID, thread ID, status] + + // STW. + EvSTWBegin // STW start [timestamp, kind] + EvSTWEnd // STW done [timestamp] + + // GC events. + EvGCActive // GC active [timestamp, seq] + EvGCBegin // GC start [timestamp, seq, stack ID] + EvGCEnd // GC done [timestamp, seq] + EvGCSweepActive // GC sweep active [timestamp, P ID] + EvGCSweepBegin // GC sweep start [timestamp, stack ID] + EvGCSweepEnd // GC sweep done [timestamp, swept bytes, reclaimed bytes] + EvGCMarkAssistActive // GC mark assist active [timestamp, goroutine ID] + EvGCMarkAssistBegin // GC mark assist start [timestamp, stack ID] + EvGCMarkAssistEnd // GC mark assist done [timestamp] + EvHeapAlloc // gcController.heapLive change [timestamp, heap alloc in bytes] + EvHeapGoal // gcController.heapGoal() change [timestamp, heap goal in bytes] + + // Annotations. + EvGoLabel // apply string label to current running goroutine [timestamp, label string ID] + EvUserTaskBegin // trace.NewTask [timestamp, internal task ID, internal parent task ID, name string ID, stack ID] + EvUserTaskEnd // end of a task [timestamp, internal task ID, stack ID] + EvUserRegionBegin // trace.{Start,With}Region [timestamp, internal task ID, name string ID, stack ID] + EvUserRegionEnd // trace.{End,With}Region [timestamp, internal task ID, name string ID, stack ID] + EvUserLog // trace.Log [timestamp, internal task ID, key string ID, value string ID, stack] + + // Coroutines. Added in Go 1.23. + EvGoSwitch // goroutine switch (coroswitch) [timestamp, goroutine ID, goroutine seq] + EvGoSwitchDestroy // goroutine switch and destroy [timestamp, goroutine ID, goroutine seq] + EvGoCreateBlocked // goroutine creation (starts blocked) [timestamp, new goroutine ID, new stack ID, stack ID] + + // GoStatus with stack. Added in Go 1.23. + EvGoStatusStack // goroutine status at the start of a generation, with a stack [timestamp, goroutine ID, M ID, status, stack ID] + + // Batch event for an experimental batch with a custom format. Added in Go 1.23. + EvExperimentalBatch // start of extra data [experiment ID, generation, M ID, timestamp, batch length, batch data...] +) + +// Experiments. +const ( + // AllocFree is the alloc-free events experiment. + AllocFree event.Experiment = 1 + iota +) + +// Experimental events. +const ( + _ event.Type = 127 + iota + + // Experimental events for AllocFree. + + // Experimental heap span events. Added in Go 1.23. + EvSpan // heap span exists [timestamp, id, npages, type/class] + EvSpanAlloc // heap span alloc [timestamp, id, npages, type/class] + EvSpanFree // heap span free [timestamp, id] + + // Experimental heap object events. Added in Go 1.23. + EvHeapObject // heap object exists [timestamp, id, type] + EvHeapObjectAlloc // heap object alloc [timestamp, id, type] + EvHeapObjectFree // heap object free [timestamp, id] + + // Experimental goroutine stack events. Added in Go 1.23. + EvGoroutineStack // stack exists [timestamp, id, order] + EvGoroutineStackAlloc // stack alloc [timestamp, id, order] + EvGoroutineStackFree // stack free [timestamp, id] +) + +// EventString returns the name of a Go 1.22 event. +func EventString(typ event.Type) string { + if int(typ) < len(specs) { + return specs[typ].Name + } + return fmt.Sprintf("Invalid(%d)", typ) +} + +func Specs() []event.Spec { + return specs[:] +} + +var specs = [...]event.Spec{ + // "Structural" Events. + EvEventBatch: { + Name: "EventBatch", + Args: []string{"gen", "m", "time", "size"}, + }, + EvStacks: { + Name: "Stacks", + }, + EvStack: { + Name: "Stack", + Args: []string{"id", "nframes"}, + IsStack: true, + }, + EvStrings: { + Name: "Strings", + }, + EvString: { + Name: "String", + Args: []string{"id"}, + HasData: true, + }, + EvCPUSamples: { + Name: "CPUSamples", + }, + EvCPUSample: { + Name: "CPUSample", + Args: []string{"time", "m", "p", "g", "stack"}, + // N.B. There's clearly a timestamp here, but these Events + // are special in that they don't appear in the regular + // M streams. + }, + EvFrequency: { + Name: "Frequency", + Args: []string{"freq"}, + }, + EvExperimentalBatch: { + Name: "ExperimentalBatch", + Args: []string{"exp", "gen", "m", "time"}, + HasData: true, // Easier to represent for raw readers. + }, + + // "Timed" Events. + EvProcsChange: { + Name: "ProcsChange", + Args: []string{"dt", "procs_value", "stack"}, + IsTimedEvent: true, + StackIDs: []int{2}, + }, + EvProcStart: { + Name: "ProcStart", + Args: []string{"dt", "p", "p_seq"}, + IsTimedEvent: true, + }, + EvProcStop: { + Name: "ProcStop", + Args: []string{"dt"}, + IsTimedEvent: true, + }, + EvProcSteal: { + Name: "ProcSteal", + Args: []string{"dt", "p", "p_seq", "m"}, + IsTimedEvent: true, + }, + EvProcStatus: { + Name: "ProcStatus", + Args: []string{"dt", "p", "pstatus"}, + IsTimedEvent: true, + }, + EvGoCreate: { + Name: "GoCreate", + Args: []string{"dt", "new_g", "new_stack", "stack"}, + IsTimedEvent: true, + StackIDs: []int{3, 2}, + }, + EvGoCreateSyscall: { + Name: "GoCreateSyscall", + Args: []string{"dt", "new_g"}, + IsTimedEvent: true, + }, + EvGoStart: { + Name: "GoStart", + Args: []string{"dt", "g", "g_seq"}, + IsTimedEvent: true, + }, + EvGoDestroy: { + Name: "GoDestroy", + Args: []string{"dt"}, + IsTimedEvent: true, + }, + EvGoDestroySyscall: { + Name: "GoDestroySyscall", + Args: []string{"dt"}, + IsTimedEvent: true, + }, + EvGoStop: { + Name: "GoStop", + Args: []string{"dt", "reason_string", "stack"}, + IsTimedEvent: true, + StackIDs: []int{2}, + StringIDs: []int{1}, + }, + EvGoBlock: { + Name: "GoBlock", + Args: []string{"dt", "reason_string", "stack"}, + IsTimedEvent: true, + StackIDs: []int{2}, + StringIDs: []int{1}, + }, + EvGoUnblock: { + Name: "GoUnblock", + Args: []string{"dt", "g", "g_seq", "stack"}, + IsTimedEvent: true, + StackIDs: []int{3}, + }, + EvGoSyscallBegin: { + Name: "GoSyscallBegin", + Args: []string{"dt", "p_seq", "stack"}, + IsTimedEvent: true, + StackIDs: []int{2}, + }, + EvGoSyscallEnd: { + Name: "GoSyscallEnd", + Args: []string{"dt"}, + StartEv: EvGoSyscallBegin, + IsTimedEvent: true, + }, + EvGoSyscallEndBlocked: { + Name: "GoSyscallEndBlocked", + Args: []string{"dt"}, + StartEv: EvGoSyscallBegin, + IsTimedEvent: true, + }, + EvGoStatus: { + Name: "GoStatus", + Args: []string{"dt", "g", "m", "gstatus"}, + IsTimedEvent: true, + }, + EvSTWBegin: { + Name: "STWBegin", + Args: []string{"dt", "kind_string", "stack"}, + IsTimedEvent: true, + StackIDs: []int{2}, + StringIDs: []int{1}, + }, + EvSTWEnd: { + Name: "STWEnd", + Args: []string{"dt"}, + StartEv: EvSTWBegin, + IsTimedEvent: true, + }, + EvGCActive: { + Name: "GCActive", + Args: []string{"dt", "gc_seq"}, + IsTimedEvent: true, + StartEv: EvGCBegin, + }, + EvGCBegin: { + Name: "GCBegin", + Args: []string{"dt", "gc_seq", "stack"}, + IsTimedEvent: true, + StackIDs: []int{2}, + }, + EvGCEnd: { + Name: "GCEnd", + Args: []string{"dt", "gc_seq"}, + StartEv: EvGCBegin, + IsTimedEvent: true, + }, + EvGCSweepActive: { + Name: "GCSweepActive", + Args: []string{"dt", "p"}, + StartEv: EvGCSweepBegin, + IsTimedEvent: true, + }, + EvGCSweepBegin: { + Name: "GCSweepBegin", + Args: []string{"dt", "stack"}, + IsTimedEvent: true, + StackIDs: []int{1}, + }, + EvGCSweepEnd: { + Name: "GCSweepEnd", + Args: []string{"dt", "swept_value", "reclaimed_value"}, + StartEv: EvGCSweepBegin, + IsTimedEvent: true, + }, + EvGCMarkAssistActive: { + Name: "GCMarkAssistActive", + Args: []string{"dt", "g"}, + StartEv: EvGCMarkAssistBegin, + IsTimedEvent: true, + }, + EvGCMarkAssistBegin: { + Name: "GCMarkAssistBegin", + Args: []string{"dt", "stack"}, + IsTimedEvent: true, + StackIDs: []int{1}, + }, + EvGCMarkAssistEnd: { + Name: "GCMarkAssistEnd", + Args: []string{"dt"}, + StartEv: EvGCMarkAssistBegin, + IsTimedEvent: true, + }, + EvHeapAlloc: { + Name: "HeapAlloc", + Args: []string{"dt", "heapalloc_value"}, + IsTimedEvent: true, + }, + EvHeapGoal: { + Name: "HeapGoal", + Args: []string{"dt", "heapgoal_value"}, + IsTimedEvent: true, + }, + EvGoLabel: { + Name: "GoLabel", + Args: []string{"dt", "label_string"}, + IsTimedEvent: true, + StringIDs: []int{1}, + }, + EvUserTaskBegin: { + Name: "UserTaskBegin", + Args: []string{"dt", "task", "parent_task", "name_string", "stack"}, + IsTimedEvent: true, + StackIDs: []int{4}, + StringIDs: []int{3}, + }, + EvUserTaskEnd: { + Name: "UserTaskEnd", + Args: []string{"dt", "task", "stack"}, + IsTimedEvent: true, + StackIDs: []int{2}, + }, + EvUserRegionBegin: { + Name: "UserRegionBegin", + Args: []string{"dt", "task", "name_string", "stack"}, + IsTimedEvent: true, + StackIDs: []int{3}, + StringIDs: []int{2}, + }, + EvUserRegionEnd: { + Name: "UserRegionEnd", + Args: []string{"dt", "task", "name_string", "stack"}, + StartEv: EvUserRegionBegin, + IsTimedEvent: true, + StackIDs: []int{3}, + StringIDs: []int{2}, + }, + EvUserLog: { + Name: "UserLog", + Args: []string{"dt", "task", "key_string", "value_string", "stack"}, + IsTimedEvent: true, + StackIDs: []int{4}, + StringIDs: []int{2, 3}, + }, + EvGoSwitch: { + Name: "GoSwitch", + Args: []string{"dt", "g", "g_seq"}, + IsTimedEvent: true, + }, + EvGoSwitchDestroy: { + Name: "GoSwitchDestroy", + Args: []string{"dt", "g", "g_seq"}, + IsTimedEvent: true, + }, + EvGoCreateBlocked: { + Name: "GoCreateBlocked", + Args: []string{"dt", "new_g", "new_stack", "stack"}, + IsTimedEvent: true, + StackIDs: []int{3, 2}, + }, + EvGoStatusStack: { + Name: "GoStatusStack", + Args: []string{"dt", "g", "m", "gstatus", "stack"}, + IsTimedEvent: true, + StackIDs: []int{4}, + }, + + // Experimental events. + + EvSpan: { + Name: "Span", + Args: []string{"dt", "id", "npages_value", "kindclass"}, + IsTimedEvent: true, + Experiment: AllocFree, + }, + EvSpanAlloc: { + Name: "SpanAlloc", + Args: []string{"dt", "id", "npages_value", "kindclass"}, + IsTimedEvent: true, + Experiment: AllocFree, + }, + EvSpanFree: { + Name: "SpanFree", + Args: []string{"dt", "id"}, + IsTimedEvent: true, + Experiment: AllocFree, + }, + EvHeapObject: { + Name: "HeapObject", + Args: []string{"dt", "id", "type"}, + IsTimedEvent: true, + Experiment: AllocFree, + }, + EvHeapObjectAlloc: { + Name: "HeapObjectAlloc", + Args: []string{"dt", "id", "type"}, + IsTimedEvent: true, + Experiment: AllocFree, + }, + EvHeapObjectFree: { + Name: "HeapObjectFree", + Args: []string{"dt", "id"}, + IsTimedEvent: true, + Experiment: AllocFree, + }, + EvGoroutineStack: { + Name: "GoroutineStack", + Args: []string{"dt", "id", "order"}, + IsTimedEvent: true, + Experiment: AllocFree, + }, + EvGoroutineStackAlloc: { + Name: "GoroutineStackAlloc", + Args: []string{"dt", "id", "order"}, + IsTimedEvent: true, + Experiment: AllocFree, + }, + EvGoroutineStackFree: { + Name: "GoroutineStackFree", + Args: []string{"dt", "id"}, + IsTimedEvent: true, + Experiment: AllocFree, + }, +} + +type GoStatus uint8 + +const ( + GoBad GoStatus = iota + GoRunnable + GoRunning + GoSyscall + GoWaiting +) + +func (s GoStatus) String() string { + switch s { + case GoRunnable: + return "Runnable" + case GoRunning: + return "Running" + case GoSyscall: + return "Syscall" + case GoWaiting: + return "Waiting" + } + return "Bad" +} + +type ProcStatus uint8 + +const ( + ProcBad ProcStatus = iota + ProcRunning + ProcIdle + ProcSyscall + ProcSyscallAbandoned +) + +func (s ProcStatus) String() string { + switch s { + case ProcRunning: + return "Running" + case ProcIdle: + return "Idle" + case ProcSyscall: + return "Syscall" + } + return "Bad" +} + +const ( + // Various format-specific constants. + MaxBatchSize = 64 << 10 + MaxFramesPerStack = 128 + MaxStringSize = 1 << 10 +) diff --git a/vendor/golang.org/x/exp/trace/internal/event/requirements.go b/vendor/golang.org/x/exp/trace/internal/event/requirements.go new file mode 100644 index 000000000000..3482d4e5af8e --- /dev/null +++ b/vendor/golang.org/x/exp/trace/internal/event/requirements.go @@ -0,0 +1,30 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by "gen.bash" from internal/trace; DO NOT EDIT. + +//go:build go1.21 + +package event + +// SchedReqs is a set of constraints on what the scheduling +// context must look like. +type SchedReqs struct { + Thread Constraint + Proc Constraint + Goroutine Constraint +} + +// Constraint represents a various presence requirements. +type Constraint uint8 + +const ( + MustNotHave Constraint = iota + MayHave + MustHave +) + +// UserGoReqs is a common requirement among events that are running +// or are close to running user code. +var UserGoReqs = SchedReqs{Thread: MustHave, Proc: MustHave, Goroutine: MustHave} diff --git a/vendor/golang.org/x/exp/trace/internal/oldtrace/order.go b/vendor/golang.org/x/exp/trace/internal/oldtrace/order.go new file mode 100644 index 000000000000..d2945461a1a4 --- /dev/null +++ b/vendor/golang.org/x/exp/trace/internal/oldtrace/order.go @@ -0,0 +1,176 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by "gen.bash" from internal/trace; DO NOT EDIT. + +//go:build go1.21 + +package oldtrace + +import "errors" + +type orderEvent struct { + ev Event + proc *proc +} + +type gStatus int + +type gState struct { + seq uint64 + status gStatus +} + +const ( + gDead gStatus = iota + gRunnable + gRunning + gWaiting + + unordered = ^uint64(0) + garbage = ^uint64(0) - 1 + noseq = ^uint64(0) + seqinc = ^uint64(0) - 1 +) + +// stateTransition returns goroutine state (sequence and status) when the event +// becomes ready for merging (init) and the goroutine state after the event (next). +func stateTransition(ev *Event) (g uint64, init, next gState) { + // Note that we have an explicit return in each case, as that produces slightly better code (tested on Go 1.19). + + switch ev.Type { + case EvGoCreate: + g = ev.Args[0] + init = gState{0, gDead} + next = gState{1, gRunnable} + return + case EvGoWaiting, EvGoInSyscall: + g = ev.G + init = gState{1, gRunnable} + next = gState{2, gWaiting} + return + case EvGoStart, EvGoStartLabel: + g = ev.G + init = gState{ev.Args[1], gRunnable} + next = gState{ev.Args[1] + 1, gRunning} + return + case EvGoStartLocal: + // noseq means that this event is ready for merging as soon as + // frontier reaches it (EvGoStartLocal is emitted on the same P + // as the corresponding EvGoCreate/EvGoUnblock, and thus the latter + // is already merged). + // seqinc is a stub for cases when event increments g sequence, + // but since we don't know current seq we also don't know next seq. + g = ev.G + init = gState{noseq, gRunnable} + next = gState{seqinc, gRunning} + return + case EvGoBlock, EvGoBlockSend, EvGoBlockRecv, EvGoBlockSelect, + EvGoBlockSync, EvGoBlockCond, EvGoBlockNet, EvGoSleep, + EvGoSysBlock, EvGoBlockGC: + g = ev.G + init = gState{noseq, gRunning} + next = gState{noseq, gWaiting} + return + case EvGoSched, EvGoPreempt: + g = ev.G + init = gState{noseq, gRunning} + next = gState{noseq, gRunnable} + return + case EvGoUnblock, EvGoSysExit: + g = ev.Args[0] + init = gState{ev.Args[1], gWaiting} + next = gState{ev.Args[1] + 1, gRunnable} + return + case EvGoUnblockLocal, EvGoSysExitLocal: + g = ev.Args[0] + init = gState{noseq, gWaiting} + next = gState{seqinc, gRunnable} + return + case EvGCStart: + g = garbage + init = gState{ev.Args[0], gDead} + next = gState{ev.Args[0] + 1, gDead} + return + default: + // no ordering requirements + g = unordered + return + } +} + +func transitionReady(g uint64, curr, init gState) bool { + return g == unordered || (init.seq == noseq || init.seq == curr.seq) && init.status == curr.status +} + +func transition(gs map[uint64]gState, g uint64, init, next gState) error { + if g == unordered { + return nil + } + curr := gs[g] + if !transitionReady(g, curr, init) { + // See comment near the call to transition, where we're building the frontier, for details on how this could + // possibly happen. + return errors.New("encountered impossible goroutine state transition") + } + switch next.seq { + case noseq: + next.seq = curr.seq + case seqinc: + next.seq = curr.seq + 1 + } + gs[g] = next + return nil +} + +type orderEventList []orderEvent + +func (l *orderEventList) Less(i, j int) bool { + return (*l)[i].ev.Ts < (*l)[j].ev.Ts +} + +func (h *orderEventList) Push(x orderEvent) { + *h = append(*h, x) + heapUp(h, len(*h)-1) +} + +func (h *orderEventList) Pop() orderEvent { + n := len(*h) - 1 + (*h)[0], (*h)[n] = (*h)[n], (*h)[0] + heapDown(h, 0, n) + x := (*h)[len(*h)-1] + *h = (*h)[:len(*h)-1] + return x +} + +func heapUp(h *orderEventList, j int) { + for { + i := (j - 1) / 2 // parent + if i == j || !h.Less(j, i) { + break + } + (*h)[i], (*h)[j] = (*h)[j], (*h)[i] + j = i + } +} + +func heapDown(h *orderEventList, i0, n int) bool { + i := i0 + for { + j1 := 2*i + 1 + if j1 >= n || j1 < 0 { // j1 < 0 after int overflow + break + } + j := j1 // left child + if j2 := j1 + 1; j2 < n && h.Less(j2, j1) { + j = j2 // = 2*i + 2 // right child + } + if !h.Less(j, i) { + break + } + (*h)[i], (*h)[j] = (*h)[j], (*h)[i] + i = j + } + return i > i0 +} diff --git a/vendor/golang.org/x/exp/trace/internal/oldtrace/parser.go b/vendor/golang.org/x/exp/trace/internal/oldtrace/parser.go new file mode 100644 index 000000000000..4e673bd48a91 --- /dev/null +++ b/vendor/golang.org/x/exp/trace/internal/oldtrace/parser.go @@ -0,0 +1,1548 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by "gen.bash" from internal/trace; DO NOT EDIT. + +//go:build go1.21 + +// Package oldtrace implements a parser for Go execution traces from versions +// 1.11–1.21. +// +// The package started as a copy of Go 1.19's golang.org/x/exp/trace, but has been +// optimized to be faster while using less memory and fewer allocations. It has +// been further modified for the specific purpose of converting traces to the +// new 1.22+ format. +package oldtrace + +import ( + "bytes" + "cmp" + "encoding/binary" + "errors" + "fmt" + "golang.org/x/exp/trace/internal/event" + "golang.org/x/exp/trace/internal/version" + "io" + "math" + "slices" + "sort" +) + +// Timestamp represents a count of nanoseconds since the beginning of the trace. +// They can only be meaningfully compared with other timestamps from the same +// trace. +type Timestamp int64 + +// Event describes one event in the trace. +type Event struct { + // The Event type is carefully laid out to optimize its size and to avoid + // pointers, the latter so that the garbage collector won't have to scan any + // memory of our millions of events. + + Ts Timestamp // timestamp in nanoseconds + G uint64 // G on which the event happened + Args [4]uint64 // event-type-specific arguments + StkID uint32 // unique stack ID + P int32 // P on which the event happened (can be a real P or one of TimerP, NetpollP, SyscallP) + Type event.Type // one of Ev* +} + +// Frame is a frame in stack traces. +type Frame struct { + PC uint64 + // string ID of the function name + Fn uint64 + // string ID of the file name + File uint64 + Line int +} + +const ( + // Special P identifiers: + FakeP = 1000000 + iota + TimerP // contains timer unblocks + NetpollP // contains network unblocks + SyscallP // contains returns from syscalls + GCP // contains GC state + ProfileP // contains recording of CPU profile samples +) + +// Trace is the result of Parse. +type Trace struct { + Version version.Version + + // Events is the sorted list of Events in the trace. + Events Events + // Stacks is the stack traces (stored as slices of PCs), keyed by stack IDs + // from the trace. + Stacks map[uint32][]uint64 + PCs map[uint64]Frame + Strings map[uint64]string + InlineStrings []string +} + +// batchOffset records the byte offset of, and number of events in, a batch. A +// batch is a sequence of events emitted by a P. Events within a single batch +// are sorted by time. +type batchOffset struct { + offset int + numEvents int +} + +type parser struct { + ver version.Version + data []byte + off int + + strings map[uint64]string + inlineStrings []string + inlineStringsMapping map[string]int + // map from Ps to their batch offsets + batchOffsets map[int32][]batchOffset + stacks map[uint32][]uint64 + stacksData []uint64 + ticksPerSec int64 + pcs map[uint64]Frame + cpuSamples []Event + timerGoids map[uint64]bool + + // state for readRawEvent + args []uint64 + + // state for parseEvent + lastTs Timestamp + lastG uint64 + // map from Ps to the last Gs that ran on them + lastGs map[int32]uint64 + lastP int32 +} + +func (p *parser) discard(n uint64) bool { + if n > math.MaxInt { + return false + } + if noff := p.off + int(n); noff < p.off || noff > len(p.data) { + return false + } else { + p.off = noff + } + return true +} + +func newParser(r io.Reader, ver version.Version) (*parser, error) { + var buf []byte + if seeker, ok := r.(io.Seeker); ok { + // Determine the size of the reader so that we can allocate a buffer + // without having to grow it later. + cur, err := seeker.Seek(0, io.SeekCurrent) + if err != nil { + return nil, err + } + end, err := seeker.Seek(0, io.SeekEnd) + if err != nil { + return nil, err + } + _, err = seeker.Seek(cur, io.SeekStart) + if err != nil { + return nil, err + } + + buf = make([]byte, end-cur) + _, err = io.ReadFull(r, buf) + if err != nil { + return nil, err + } + } else { + var err error + buf, err = io.ReadAll(r) + if err != nil { + return nil, err + } + } + return &parser{data: buf, ver: ver, timerGoids: make(map[uint64]bool)}, nil +} + +// Parse parses Go execution traces from versions 1.11–1.21. The provided reader +// will be read to completion and the entire trace will be materialized in +// memory. That is, this function does not allow incremental parsing. +// +// The reader has to be positioned just after the trace header and vers needs to +// be the version of the trace. This can be achieved by using +// version.ReadHeader. +func Parse(r io.Reader, vers version.Version) (Trace, error) { + // We accept the version as an argument because golang.org/x/exp/trace will have + // already read the version to determine which parser to use. + p, err := newParser(r, vers) + if err != nil { + return Trace{}, err + } + return p.parse() +} + +// parse parses, post-processes and verifies the trace. +func (p *parser) parse() (Trace, error) { + defer func() { + p.data = nil + }() + + // We parse a trace by running the following steps in order: + // + // 1. In the initial pass we collect information about batches (their + // locations and sizes.) We also parse CPU profiling samples in this + // step, simply to reduce the number of full passes that we need. + // + // 2. In the second pass we parse batches and merge them into a globally + // ordered event stream. This uses the batch information from the first + // pass to quickly find batches. + // + // 3. After all events have been parsed we convert their timestamps from CPU + // ticks to wall time. Furthermore we move timers and syscalls to + // dedicated, fake Ps. + // + // 4. Finally, we validate the trace. + + p.strings = make(map[uint64]string) + p.batchOffsets = make(map[int32][]batchOffset) + p.lastGs = make(map[int32]uint64) + p.stacks = make(map[uint32][]uint64) + p.pcs = make(map[uint64]Frame) + p.inlineStringsMapping = make(map[string]int) + + if err := p.collectBatchesAndCPUSamples(); err != nil { + return Trace{}, err + } + + events, err := p.parseEventBatches() + if err != nil { + return Trace{}, err + } + + if p.ticksPerSec == 0 { + return Trace{}, errors.New("no EvFrequency event") + } + + if events.Len() > 0 { + // Translate cpu ticks to real time. + minTs := events.Ptr(0).Ts + // Use floating point to avoid integer overflows. + freq := 1e9 / float64(p.ticksPerSec) + for i := 0; i < events.Len(); i++ { + ev := events.Ptr(i) + ev.Ts = Timestamp(float64(ev.Ts-minTs) * freq) + // Move timers and syscalls to separate fake Ps. + if p.timerGoids[ev.G] && ev.Type == EvGoUnblock { + ev.P = TimerP + } + if ev.Type == EvGoSysExit { + ev.P = SyscallP + } + } + } + + if err := p.postProcessTrace(events); err != nil { + return Trace{}, err + } + + res := Trace{ + Version: p.ver, + Events: events, + Stacks: p.stacks, + Strings: p.strings, + InlineStrings: p.inlineStrings, + PCs: p.pcs, + } + return res, nil +} + +// rawEvent is a helper type used during parsing. +type rawEvent struct { + typ event.Type + args []uint64 + sargs []string + + // if typ == EvBatch, these fields describe the batch. + batchPid int32 + batchOffset int +} + +type proc struct { + pid int32 + // the remaining events in the current batch + events []Event + // buffer for reading batches into, aliased by proc.events + buf []Event + + // there are no more batches left + done bool +} + +const eventsBucketSize = 524288 // 32 MiB of events + +type Events struct { + // Events is a slice of slices that grows one slice of size eventsBucketSize + // at a time. This avoids the O(n) cost of slice growth in append, and + // additionally allows consumers to drop references to parts of the data, + // freeing memory piecewise. + n int + buckets []*[eventsBucketSize]Event + off int +} + +// grow grows the slice by one and returns a pointer to the new element, without +// overwriting it. +func (l *Events) grow() *Event { + a, b := l.index(l.n) + if a >= len(l.buckets) { + l.buckets = append(l.buckets, new([eventsBucketSize]Event)) + } + ptr := &l.buckets[a][b] + l.n++ + return ptr +} + +// append appends v to the slice and returns a pointer to the new element. +func (l *Events) append(v Event) *Event { + ptr := l.grow() + *ptr = v + return ptr +} + +func (l *Events) Ptr(i int) *Event { + a, b := l.index(i + l.off) + return &l.buckets[a][b] +} + +func (l *Events) index(i int) (int, int) { + // Doing the division on uint instead of int compiles this function to a + // shift and an AND (for power of 2 bucket sizes), versus a whole bunch of + // instructions for int. + return int(uint(i) / eventsBucketSize), int(uint(i) % eventsBucketSize) +} + +func (l *Events) Len() int { + return l.n - l.off +} + +func (l *Events) Less(i, j int) bool { + return l.Ptr(i).Ts < l.Ptr(j).Ts +} + +func (l *Events) Swap(i, j int) { + *l.Ptr(i), *l.Ptr(j) = *l.Ptr(j), *l.Ptr(i) +} + +func (l *Events) Pop() (*Event, bool) { + if l.off == l.n { + return nil, false + } + a, b := l.index(l.off) + ptr := &l.buckets[a][b] + l.off++ + if b == eventsBucketSize-1 || l.off == l.n { + // We've consumed the last event from the bucket, so drop the bucket and + // allow GC to collect it. + l.buckets[a] = nil + } + return ptr, true +} + +func (l *Events) All() func(yield func(ev *Event) bool) { + return func(yield func(ev *Event) bool) { + for i := 0; i < l.Len(); i++ { + a, b := l.index(i + l.off) + ptr := &l.buckets[a][b] + if !yield(ptr) { + return + } + } + } +} + +// parseEventBatches reads per-P event batches and merges them into a single, consistent +// stream. The high level idea is as follows. Events within an individual batch +// are in correct order, because they are emitted by a single P. So we need to +// produce a correct interleaving of the batches. To do this we take first +// unmerged event from each batch (frontier). Then choose subset that is "ready" +// to be merged, that is, events for which all dependencies are already merged. +// Then we choose event with the lowest timestamp from the subset, merge it and +// repeat. This approach ensures that we form a consistent stream even if +// timestamps are incorrect (condition observed on some machines). +func (p *parser) parseEventBatches() (Events, error) { + // The ordering of CPU profile sample events in the data stream is based on + // when each run of the signal handler was able to acquire the spinlock, + // with original timestamps corresponding to when ReadTrace pulled the data + // off of the profBuf queue. Re-sort them by the timestamp we captured + // inside the signal handler. + slices.SortFunc(p.cpuSamples, func(a, b Event) int { + return cmp.Compare(a.Ts, b.Ts) + }) + + allProcs := make([]proc, 0, len(p.batchOffsets)) + for pid := range p.batchOffsets { + allProcs = append(allProcs, proc{pid: pid}) + } + allProcs = append(allProcs, proc{pid: ProfileP, events: p.cpuSamples}) + + events := Events{} + + // Merge events as long as at least one P has more events + gs := make(map[uint64]gState) + // Note: technically we don't need a priority queue here. We're only ever + // interested in the earliest elligible event, which means we just have to + // track the smallest element. However, in practice, the priority queue + // performs better, because for each event we only have to compute its state + // transition once, not on each iteration. If it was elligible before, it'll + // already be in the queue. Furthermore, on average, we only have one P to + // look at in each iteration, because all other Ps are already in the queue. + var frontier orderEventList + + availableProcs := make([]*proc, len(allProcs)) + for i := range allProcs { + availableProcs[i] = &allProcs[i] + } + for { + pidLoop: + for i := 0; i < len(availableProcs); i++ { + proc := availableProcs[i] + + for len(proc.events) == 0 { + // Call loadBatch in a loop because sometimes batches are empty + evs, err := p.loadBatch(proc.pid, proc.buf[:0]) + proc.buf = evs[:0] + if err == io.EOF { + // This P has no more events + proc.done = true + availableProcs[i], availableProcs[len(availableProcs)-1] = availableProcs[len(availableProcs)-1], availableProcs[i] + availableProcs = availableProcs[:len(availableProcs)-1] + // We swapped the element at i with another proc, so look at + // the index again + i-- + continue pidLoop + } else if err != nil { + return Events{}, err + } else { + proc.events = evs + } + } + + ev := &proc.events[0] + g, init, _ := stateTransition(ev) + + // TODO(dh): This implementation matches the behavior of the + // upstream 'go tool trace', and works in practice, but has run into + // the following inconsistency during fuzzing: what happens if + // multiple Ps have events for the same G? While building the + // frontier we will check all of the events against the current + // state of the G. However, when we process the frontier, the state + // of the G changes, and a transition that was valid while building + // the frontier may no longer be valid when processing the frontier. + // Is this something that can happen for real, valid traces, or is + // this only possible with corrupt data? + if !transitionReady(g, gs[g], init) { + continue + } + proc.events = proc.events[1:] + availableProcs[i], availableProcs[len(availableProcs)-1] = availableProcs[len(availableProcs)-1], availableProcs[i] + availableProcs = availableProcs[:len(availableProcs)-1] + frontier.Push(orderEvent{*ev, proc}) + + // We swapped the element at i with another proc, so look at the + // index again + i-- + } + + if len(frontier) == 0 { + for i := range allProcs { + if !allProcs[i].done { + return Events{}, fmt.Errorf("no consistent ordering of events possible") + } + } + break + } + f := frontier.Pop() + + // We're computing the state transition twice, once when computing the + // frontier, and now to apply the transition. This is fine because + // stateTransition is a pure function. Computing it again is cheaper + // than storing large items in the frontier. + g, init, next := stateTransition(&f.ev) + + // Get rid of "Local" events, they are intended merely for ordering. + switch f.ev.Type { + case EvGoStartLocal: + f.ev.Type = EvGoStart + case EvGoUnblockLocal: + f.ev.Type = EvGoUnblock + case EvGoSysExitLocal: + f.ev.Type = EvGoSysExit + } + events.append(f.ev) + + if err := transition(gs, g, init, next); err != nil { + return Events{}, err + } + availableProcs = append(availableProcs, f.proc) + } + + // At this point we have a consistent stream of events. Make sure time + // stamps respect the ordering. The tests will skip (not fail) the test case + // if they see this error. + if !sort.IsSorted(&events) { + return Events{}, ErrTimeOrder + } + + // The last part is giving correct timestamps to EvGoSysExit events. The + // problem with EvGoSysExit is that actual syscall exit timestamp + // (ev.Args[2]) is potentially acquired long before event emission. So far + // we've used timestamp of event emission (ev.Ts). We could not set ev.Ts = + // ev.Args[2] earlier, because it would produce seemingly broken timestamps + // (misplaced event). We also can't simply update the timestamp and resort + // events, because if timestamps are broken we will misplace the event and + // later report logically broken trace (instead of reporting broken + // timestamps). + lastSysBlock := make(map[uint64]Timestamp) + for i := 0; i < events.Len(); i++ { + ev := events.Ptr(i) + switch ev.Type { + case EvGoSysBlock, EvGoInSyscall: + lastSysBlock[ev.G] = ev.Ts + case EvGoSysExit: + ts := Timestamp(ev.Args[2]) + if ts == 0 { + continue + } + block := lastSysBlock[ev.G] + if block == 0 { + return Events{}, fmt.Errorf("stray syscall exit") + } + if ts < block { + return Events{}, ErrTimeOrder + } + ev.Ts = ts + } + } + sort.Stable(&events) + + return events, nil +} + +// collectBatchesAndCPUSamples records the offsets of batches and parses CPU samples. +func (p *parser) collectBatchesAndCPUSamples() error { + // Read events. + var raw rawEvent + var curP int32 + for n := uint64(0); ; n++ { + err := p.readRawEvent(skipArgs|skipStrings, &raw) + if err == io.EOF { + break + } + if err != nil { + return err + } + if raw.typ == EvNone { + continue + } + + if raw.typ == EvBatch { + bo := batchOffset{offset: raw.batchOffset} + p.batchOffsets[raw.batchPid] = append(p.batchOffsets[raw.batchPid], bo) + curP = raw.batchPid + } + + batches := p.batchOffsets[curP] + if len(batches) == 0 { + return fmt.Errorf("read event %d with current P of %d, but P has no batches yet", + raw.typ, curP) + } + batches[len(batches)-1].numEvents++ + + if raw.typ == EvCPUSample { + e := Event{Type: raw.typ} + + argOffset := 1 + narg := raw.argNum() + if len(raw.args) != narg { + return fmt.Errorf("CPU sample has wrong number of arguments: want %d, got %d", narg, len(raw.args)) + } + for i := argOffset; i < narg; i++ { + if i == narg-1 { + e.StkID = uint32(raw.args[i]) + } else { + e.Args[i-argOffset] = raw.args[i] + } + } + + e.Ts = Timestamp(e.Args[0]) + e.P = int32(e.Args[1]) + e.G = e.Args[2] + e.Args[0] = 0 + + // Most events are written out by the active P at the exact moment + // they describe. CPU profile samples are different because they're + // written to the tracing log after some delay, by a separate worker + // goroutine, into a separate buffer. + // + // We keep these in their own batch until all of the batches are + // merged in timestamp order. We also (right before the merge) + // re-sort these events by the timestamp captured in the profiling + // signal handler. + // + // Note that we're not concerned about the memory usage of storing + // all CPU samples during the indexing phase. There are orders of + // magnitude fewer CPU samples than runtime events. + p.cpuSamples = append(p.cpuSamples, e) + } + } + + return nil +} + +const ( + skipArgs = 1 << iota + skipStrings +) + +func (p *parser) readByte() (byte, bool) { + if p.off < len(p.data) && p.off >= 0 { + b := p.data[p.off] + p.off++ + return b, true + } else { + return 0, false + } +} + +func (p *parser) readFull(n int) ([]byte, error) { + if p.off >= len(p.data) || p.off < 0 || p.off+n > len(p.data) { + // p.off < 0 is impossible but makes BCE happy. + // + // We do fail outright if there's not enough data, we don't care about + // partial results. + return nil, io.ErrUnexpectedEOF + } + buf := p.data[p.off : p.off+n] + p.off += n + return buf, nil +} + +// readRawEvent reads a raw event into ev. The slices in ev are only valid until +// the next call to readRawEvent, even when storing to a different location. +func (p *parser) readRawEvent(flags uint, ev *rawEvent) error { + // The number of arguments is encoded using two bits and can thus only + // represent the values 0–3. The value 3 (on the wire) indicates that + // arguments are prefixed by their byte length, to encode >=3 arguments. + const inlineArgs = 3 + + // Read event type and number of arguments (1 byte). + b, ok := p.readByte() + if !ok { + return io.EOF + } + typ := event.Type(b << 2 >> 2) + // Most events have a timestamp before the actual arguments, so we add 1 and + // parse it like it's the first argument. EvString has a special format and + // the number of arguments doesn't matter. EvBatch writes '1' as the number + // of arguments, but actually has two: a pid and a timestamp, but here the + // timestamp is the second argument, not the first; adding 1 happens to come + // up with the correct number, but it doesn't matter, because EvBatch has + // custom logic for parsing. + // + // Note that because we're adding 1, inlineArgs == 3 describes the largest + // number of logical arguments that isn't length-prefixed, even though the + // value 3 on the wire indicates length-prefixing. For us, that becomes narg + // == 4. + narg := b>>6 + 1 + if typ == EvNone || typ >= EvCount || EventDescriptions[typ].minVersion > p.ver { + return fmt.Errorf("unknown event type %d", typ) + } + + switch typ { + case EvString: + if flags&skipStrings != 0 { + // String dictionary entry [ID, length, string]. + if _, err := p.readVal(); err != nil { + return errMalformedVarint + } + ln, err := p.readVal() + if err != nil { + return err + } + if !p.discard(ln) { + return fmt.Errorf("failed to read trace: %w", io.EOF) + } + } else { + // String dictionary entry [ID, length, string]. + id, err := p.readVal() + if err != nil { + return err + } + if id == 0 { + return errors.New("string has invalid id 0") + } + if p.strings[id] != "" { + return fmt.Errorf("string has duplicate id %d", id) + } + var ln uint64 + ln, err = p.readVal() + if err != nil { + return err + } + if ln == 0 { + return errors.New("string has invalid length 0") + } + if ln > 1e6 { + return fmt.Errorf("string has too large length %d", ln) + } + buf, err := p.readFull(int(ln)) + if err != nil { + return fmt.Errorf("failed to read trace: %w", err) + } + p.strings[id] = string(buf) + } + + ev.typ = EvNone + return nil + case EvBatch: + if want := byte(2); narg != want { + return fmt.Errorf("EvBatch has wrong number of arguments: got %d, want %d", narg, want) + } + + // -1 because we've already read the first byte of the batch + off := p.off - 1 + + pid, err := p.readVal() + if err != nil { + return err + } + if pid != math.MaxUint64 && pid > math.MaxInt32 { + return fmt.Errorf("processor ID %d is larger than maximum of %d", pid, uint64(math.MaxUint)) + } + + var pid32 int32 + if pid == math.MaxUint64 { + pid32 = -1 + } else { + pid32 = int32(pid) + } + + v, err := p.readVal() + if err != nil { + return err + } + + *ev = rawEvent{ + typ: EvBatch, + args: p.args[:0], + batchPid: pid32, + batchOffset: off, + } + ev.args = append(ev.args, pid, v) + return nil + default: + *ev = rawEvent{typ: typ, args: p.args[:0]} + if narg <= inlineArgs { + if flags&skipArgs == 0 { + for i := 0; i < int(narg); i++ { + v, err := p.readVal() + if err != nil { + return fmt.Errorf("failed to read event %d argument: %w", typ, err) + } + ev.args = append(ev.args, v) + } + } else { + for i := 0; i < int(narg); i++ { + if _, err := p.readVal(); err != nil { + return fmt.Errorf("failed to read event %d argument: %w", typ, errMalformedVarint) + } + } + } + } else { + // More than inlineArgs args, the first value is length of the event + // in bytes. + v, err := p.readVal() + if err != nil { + return fmt.Errorf("failed to read event %d argument: %w", typ, err) + } + + if limit := uint64(2048); v > limit { + // At the time of Go 1.19, v seems to be at most 128. Set 2048 + // as a generous upper limit and guard against malformed traces. + return fmt.Errorf("failed to read event %d argument: length-prefixed argument too big: %d bytes, limit is %d", typ, v, limit) + } + + if flags&skipArgs == 0 || typ == EvCPUSample { + buf, err := p.readFull(int(v)) + if err != nil { + return fmt.Errorf("failed to read trace: %w", err) + } + for len(buf) > 0 { + var v uint64 + v, buf, err = readValFrom(buf) + if err != nil { + return err + } + ev.args = append(ev.args, v) + } + } else { + // Skip over arguments + if !p.discard(v) { + return fmt.Errorf("failed to read trace: %w", io.EOF) + } + } + if typ == EvUserLog { + // EvUserLog records are followed by a value string + if flags&skipArgs == 0 { + // Read string + s, err := p.readStr() + if err != nil { + return err + } + ev.sargs = append(ev.sargs, s) + } else { + // Skip string + v, err := p.readVal() + if err != nil { + return err + } + if !p.discard(v) { + return io.EOF + } + } + } + } + + p.args = ev.args[:0] + return nil + } +} + +// loadBatch loads the next batch for pid and appends its contents to to events. +func (p *parser) loadBatch(pid int32, events []Event) ([]Event, error) { + offsets := p.batchOffsets[pid] + if len(offsets) == 0 { + return nil, io.EOF + } + n := offsets[0].numEvents + offset := offsets[0].offset + offsets = offsets[1:] + p.batchOffsets[pid] = offsets + + p.off = offset + + if cap(events) < n { + events = make([]Event, 0, n) + } + + gotHeader := false + var raw rawEvent + var ev Event + for { + err := p.readRawEvent(0, &raw) + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + if raw.typ == EvNone || raw.typ == EvCPUSample { + continue + } + if raw.typ == EvBatch { + if gotHeader { + break + } else { + gotHeader = true + } + } + + err = p.parseEvent(&raw, &ev) + if err != nil { + return nil, err + } + if ev.Type != EvNone { + events = append(events, ev) + } + } + + return events, nil +} + +func (p *parser) readStr() (s string, err error) { + sz, err := p.readVal() + if err != nil { + return "", err + } + if sz == 0 { + return "", nil + } + if sz > 1e6 { + return "", fmt.Errorf("string is too large (len=%d)", sz) + } + buf, err := p.readFull(int(sz)) + if err != nil { + return "", fmt.Errorf("failed to read trace: %w", err) + } + return string(buf), nil +} + +// parseEvent transforms raw events into events. +// It does analyze and verify per-event-type arguments. +func (p *parser) parseEvent(raw *rawEvent, ev *Event) error { + desc := &EventDescriptions[raw.typ] + if desc.Name == "" { + return fmt.Errorf("missing description for event type %d", raw.typ) + } + narg := raw.argNum() + if len(raw.args) != narg { + return fmt.Errorf("%s has wrong number of arguments: want %d, got %d", desc.Name, narg, len(raw.args)) + } + switch raw.typ { + case EvBatch: + p.lastGs[p.lastP] = p.lastG + if raw.args[0] != math.MaxUint64 && raw.args[0] > math.MaxInt32 { + return fmt.Errorf("processor ID %d is larger than maximum of %d", raw.args[0], uint64(math.MaxInt32)) + } + if raw.args[0] == math.MaxUint64 { + p.lastP = -1 + } else { + p.lastP = int32(raw.args[0]) + } + p.lastG = p.lastGs[p.lastP] + p.lastTs = Timestamp(raw.args[1]) + case EvFrequency: + p.ticksPerSec = int64(raw.args[0]) + if p.ticksPerSec <= 0 { + // The most likely cause for this is tick skew on different CPUs. + // For example, solaris/amd64 seems to have wildly different + // ticks on different CPUs. + return ErrTimeOrder + } + case EvTimerGoroutine: + p.timerGoids[raw.args[0]] = true + case EvStack: + if len(raw.args) < 2 { + return fmt.Errorf("EvStack has wrong number of arguments: want at least 2, got %d", len(raw.args)) + } + size := raw.args[1] + if size > 1000 { + return fmt.Errorf("EvStack has bad number of frames: %d", size) + } + want := 2 + 4*size + if uint64(len(raw.args)) != want { + return fmt.Errorf("EvStack has wrong number of arguments: want %d, got %d", want, len(raw.args)) + } + id := uint32(raw.args[0]) + if id != 0 && size > 0 { + stk := p.allocateStack(size) + for i := 0; i < int(size); i++ { + pc := raw.args[2+i*4+0] + fn := raw.args[2+i*4+1] + file := raw.args[2+i*4+2] + line := raw.args[2+i*4+3] + stk[i] = pc + + if _, ok := p.pcs[pc]; !ok { + p.pcs[pc] = Frame{PC: pc, Fn: fn, File: file, Line: int(line)} + } + } + p.stacks[id] = stk + } + case EvCPUSample: + // These events get parsed during the indexing step and don't strictly + // belong to the batch. + default: + *ev = Event{Type: raw.typ, P: p.lastP, G: p.lastG} + var argOffset int + ev.Ts = p.lastTs + Timestamp(raw.args[0]) + argOffset = 1 + p.lastTs = ev.Ts + for i := argOffset; i < narg; i++ { + if i == narg-1 && desc.Stack { + ev.StkID = uint32(raw.args[i]) + } else { + ev.Args[i-argOffset] = raw.args[i] + } + } + switch raw.typ { + case EvGoStart, EvGoStartLocal, EvGoStartLabel: + p.lastG = ev.Args[0] + ev.G = p.lastG + case EvGoEnd, EvGoStop, EvGoSched, EvGoPreempt, + EvGoSleep, EvGoBlock, EvGoBlockSend, EvGoBlockRecv, + EvGoBlockSelect, EvGoBlockSync, EvGoBlockCond, EvGoBlockNet, + EvGoSysBlock, EvGoBlockGC: + p.lastG = 0 + case EvGoSysExit, EvGoWaiting, EvGoInSyscall: + ev.G = ev.Args[0] + case EvUserTaskCreate: + // e.Args 0: taskID, 1:parentID, 2:nameID + case EvUserRegion: + // e.Args 0: taskID, 1: mode, 2:nameID + case EvUserLog: + // e.Args 0: taskID, 1:keyID, 2: stackID, 3: messageID + // raw.sargs 0: message + + if id, ok := p.inlineStringsMapping[raw.sargs[0]]; ok { + ev.Args[3] = uint64(id) + } else { + id := len(p.inlineStrings) + p.inlineStringsMapping[raw.sargs[0]] = id + p.inlineStrings = append(p.inlineStrings, raw.sargs[0]) + ev.Args[3] = uint64(id) + } + } + + return nil + } + + ev.Type = EvNone + return nil +} + +// ErrTimeOrder is returned by Parse when the trace contains +// time stamps that do not respect actual event ordering. +var ErrTimeOrder = errors.New("time stamps out of order") + +// postProcessTrace does inter-event verification and information restoration. +// The resulting trace is guaranteed to be consistent +// (for example, a P does not run two Gs at the same time, or a G is indeed +// blocked before an unblock event). +func (p *parser) postProcessTrace(events Events) error { + const ( + gDead = iota + gRunnable + gRunning + gWaiting + ) + type gdesc struct { + state int + ev *Event + evStart *Event + evCreate *Event + evMarkAssist *Event + } + type pdesc struct { + running bool + g uint64 + evSweep *Event + } + + gs := make(map[uint64]gdesc) + ps := make(map[int32]pdesc) + tasks := make(map[uint64]*Event) // task id to task creation events + activeRegions := make(map[uint64][]*Event) // goroutine id to stack of regions + gs[0] = gdesc{state: gRunning} + var evGC, evSTW *Event + + checkRunning := func(p pdesc, g gdesc, ev *Event, allowG0 bool) error { + name := EventDescriptions[ev.Type].Name + if g.state != gRunning { + return fmt.Errorf("g %d is not running while %s (time %d)", ev.G, name, ev.Ts) + } + if p.g != ev.G { + return fmt.Errorf("p %d is not running g %d while %s (time %d)", ev.P, ev.G, name, ev.Ts) + } + if !allowG0 && ev.G == 0 { + return fmt.Errorf("g 0 did %s (time %d)", name, ev.Ts) + } + return nil + } + + for evIdx := 0; evIdx < events.Len(); evIdx++ { + ev := events.Ptr(evIdx) + + switch ev.Type { + case EvProcStart: + p := ps[ev.P] + if p.running { + return fmt.Errorf("p %d is running before start (time %d)", ev.P, ev.Ts) + } + p.running = true + + ps[ev.P] = p + case EvProcStop: + p := ps[ev.P] + if !p.running { + return fmt.Errorf("p %d is not running before stop (time %d)", ev.P, ev.Ts) + } + if p.g != 0 { + return fmt.Errorf("p %d is running a goroutine %d during stop (time %d)", ev.P, p.g, ev.Ts) + } + p.running = false + + ps[ev.P] = p + case EvGCStart: + if evGC != nil { + return fmt.Errorf("previous GC is not ended before a new one (time %d)", ev.Ts) + } + evGC = ev + // Attribute this to the global GC state. + ev.P = GCP + case EvGCDone: + if evGC == nil { + return fmt.Errorf("bogus GC end (time %d)", ev.Ts) + } + evGC = nil + case EvSTWStart: + evp := &evSTW + if *evp != nil { + return fmt.Errorf("previous STW is not ended before a new one (time %d)", ev.Ts) + } + *evp = ev + case EvSTWDone: + evp := &evSTW + if *evp == nil { + return fmt.Errorf("bogus STW end (time %d)", ev.Ts) + } + *evp = nil + case EvGCSweepStart: + p := ps[ev.P] + if p.evSweep != nil { + return fmt.Errorf("previous sweeping is not ended before a new one (time %d)", ev.Ts) + } + p.evSweep = ev + + ps[ev.P] = p + case EvGCMarkAssistStart: + g := gs[ev.G] + if g.evMarkAssist != nil { + return fmt.Errorf("previous mark assist is not ended before a new one (time %d)", ev.Ts) + } + g.evMarkAssist = ev + + gs[ev.G] = g + case EvGCMarkAssistDone: + // Unlike most events, mark assists can be in progress when a + // goroutine starts tracing, so we can't report an error here. + g := gs[ev.G] + if g.evMarkAssist != nil { + g.evMarkAssist = nil + } + + gs[ev.G] = g + case EvGCSweepDone: + p := ps[ev.P] + if p.evSweep == nil { + return fmt.Errorf("bogus sweeping end (time %d)", ev.Ts) + } + p.evSweep = nil + + ps[ev.P] = p + case EvGoWaiting: + g := gs[ev.G] + if g.state != gRunnable { + return fmt.Errorf("g %d is not runnable before EvGoWaiting (time %d)", ev.G, ev.Ts) + } + g.state = gWaiting + g.ev = ev + + gs[ev.G] = g + case EvGoInSyscall: + g := gs[ev.G] + if g.state != gRunnable { + return fmt.Errorf("g %d is not runnable before EvGoInSyscall (time %d)", ev.G, ev.Ts) + } + g.state = gWaiting + g.ev = ev + + gs[ev.G] = g + case EvGoCreate: + g := gs[ev.G] + p := ps[ev.P] + if err := checkRunning(p, g, ev, true); err != nil { + return err + } + if _, ok := gs[ev.Args[0]]; ok { + return fmt.Errorf("g %d already exists (time %d)", ev.Args[0], ev.Ts) + } + gs[ev.Args[0]] = gdesc{state: gRunnable, ev: ev, evCreate: ev} + + case EvGoStart, EvGoStartLabel: + g := gs[ev.G] + p := ps[ev.P] + if g.state != gRunnable { + return fmt.Errorf("g %d is not runnable before start (time %d)", ev.G, ev.Ts) + } + if p.g != 0 { + return fmt.Errorf("p %d is already running g %d while start g %d (time %d)", ev.P, p.g, ev.G, ev.Ts) + } + g.state = gRunning + g.evStart = ev + p.g = ev.G + if g.evCreate != nil { + ev.StkID = uint32(g.evCreate.Args[1]) + g.evCreate = nil + } + + if g.ev != nil { + g.ev = nil + } + + gs[ev.G] = g + ps[ev.P] = p + case EvGoEnd, EvGoStop: + g := gs[ev.G] + p := ps[ev.P] + if err := checkRunning(p, g, ev, false); err != nil { + return err + } + g.evStart = nil + g.state = gDead + p.g = 0 + + if ev.Type == EvGoEnd { // flush all active regions + delete(activeRegions, ev.G) + } + + gs[ev.G] = g + ps[ev.P] = p + case EvGoSched, EvGoPreempt: + g := gs[ev.G] + p := ps[ev.P] + if err := checkRunning(p, g, ev, false); err != nil { + return err + } + g.state = gRunnable + g.evStart = nil + p.g = 0 + g.ev = ev + + gs[ev.G] = g + ps[ev.P] = p + case EvGoUnblock: + g := gs[ev.G] + p := ps[ev.P] + if g.state != gRunning { + return fmt.Errorf("g %d is not running while unpark (time %d)", ev.G, ev.Ts) + } + if ev.P != TimerP && p.g != ev.G { + return fmt.Errorf("p %d is not running g %d while unpark (time %d)", ev.P, ev.G, ev.Ts) + } + g1 := gs[ev.Args[0]] + if g1.state != gWaiting { + return fmt.Errorf("g %d is not waiting before unpark (time %d)", ev.Args[0], ev.Ts) + } + if g1.ev != nil && g1.ev.Type == EvGoBlockNet { + ev.P = NetpollP + } + g1.state = gRunnable + g1.ev = ev + gs[ev.Args[0]] = g1 + + case EvGoSysCall: + g := gs[ev.G] + p := ps[ev.P] + if err := checkRunning(p, g, ev, false); err != nil { + return err + } + g.ev = ev + + gs[ev.G] = g + case EvGoSysBlock: + g := gs[ev.G] + p := ps[ev.P] + if err := checkRunning(p, g, ev, false); err != nil { + return err + } + g.state = gWaiting + g.evStart = nil + p.g = 0 + + gs[ev.G] = g + ps[ev.P] = p + case EvGoSysExit: + g := gs[ev.G] + if g.state != gWaiting { + return fmt.Errorf("g %d is not waiting during syscall exit (time %d)", ev.G, ev.Ts) + } + g.state = gRunnable + g.ev = ev + + gs[ev.G] = g + case EvGoSleep, EvGoBlock, EvGoBlockSend, EvGoBlockRecv, + EvGoBlockSelect, EvGoBlockSync, EvGoBlockCond, EvGoBlockNet, EvGoBlockGC: + g := gs[ev.G] + p := ps[ev.P] + if err := checkRunning(p, g, ev, false); err != nil { + return err + } + g.state = gWaiting + g.ev = ev + g.evStart = nil + p.g = 0 + + gs[ev.G] = g + ps[ev.P] = p + case EvUserTaskCreate: + taskid := ev.Args[0] + if prevEv, ok := tasks[taskid]; ok { + return fmt.Errorf("task id conflicts (id:%d), %q vs %q", taskid, ev, prevEv) + } + tasks[ev.Args[0]] = ev + + case EvUserTaskEnd: + taskid := ev.Args[0] + delete(tasks, taskid) + + case EvUserRegion: + mode := ev.Args[1] + regions := activeRegions[ev.G] + if mode == 0 { // region start + activeRegions[ev.G] = append(regions, ev) // push + } else if mode == 1 { // region end + n := len(regions) + if n > 0 { // matching region start event is in the trace. + s := regions[n-1] + if s.Args[0] != ev.Args[0] || s.Args[2] != ev.Args[2] { // task id, region name mismatch + return fmt.Errorf("misuse of region in goroutine %d: span end %q when the inner-most active span start event is %q", ev.G, ev, s) + } + + if n > 1 { + activeRegions[ev.G] = regions[:n-1] + } else { + delete(activeRegions, ev.G) + } + } + } else { + return fmt.Errorf("invalid user region mode: %q", ev) + } + } + + if ev.StkID != 0 && len(p.stacks[ev.StkID]) == 0 { + // Make sure events don't refer to stacks that don't exist or to + // stacks with zero frames. Neither of these should be possible, but + // better be safe than sorry. + + ev.StkID = 0 + } + + } + + // TODO(mknyszek): restore stacks for EvGoStart events. + return nil +} + +var errMalformedVarint = errors.New("malformatted base-128 varint") + +// readVal reads unsigned base-128 value from r. +func (p *parser) readVal() (uint64, error) { + v, n := binary.Uvarint(p.data[p.off:]) + if n <= 0 { + return 0, errMalformedVarint + } + p.off += n + return v, nil +} + +func readValFrom(buf []byte) (v uint64, rem []byte, err error) { + v, n := binary.Uvarint(buf) + if n <= 0 { + return 0, nil, errMalformedVarint + } + return v, buf[n:], nil +} + +func (ev *Event) String() string { + desc := &EventDescriptions[ev.Type] + w := new(bytes.Buffer) + fmt.Fprintf(w, "%d %s p=%d g=%d stk=%d", ev.Ts, desc.Name, ev.P, ev.G, ev.StkID) + for i, a := range desc.Args { + fmt.Fprintf(w, " %s=%d", a, ev.Args[i]) + } + return w.String() +} + +// argNum returns total number of args for the event accounting for timestamps, +// sequence numbers and differences between trace format versions. +func (raw *rawEvent) argNum() int { + desc := &EventDescriptions[raw.typ] + if raw.typ == EvStack { + return len(raw.args) + } + narg := len(desc.Args) + if desc.Stack { + narg++ + } + switch raw.typ { + case EvBatch, EvFrequency, EvTimerGoroutine: + return narg + } + narg++ // timestamp + return narg +} + +// Event types in the trace. +// Verbatim copy from src/runtime/trace.go with the "trace" prefix removed. +const ( + EvNone event.Type = 0 // unused + EvBatch event.Type = 1 // start of per-P batch of events [pid, timestamp] + EvFrequency event.Type = 2 // contains tracer timer frequency [frequency (ticks per second)] + EvStack event.Type = 3 // stack [stack id, number of PCs, array of {PC, func string ID, file string ID, line}] + EvGomaxprocs event.Type = 4 // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id] + EvProcStart event.Type = 5 // start of P [timestamp, thread id] + EvProcStop event.Type = 6 // stop of P [timestamp] + EvGCStart event.Type = 7 // GC start [timestamp, seq, stack id] + EvGCDone event.Type = 8 // GC done [timestamp] + EvSTWStart event.Type = 9 // GC mark termination start [timestamp, kind] + EvSTWDone event.Type = 10 // GC mark termination done [timestamp] + EvGCSweepStart event.Type = 11 // GC sweep start [timestamp, stack id] + EvGCSweepDone event.Type = 12 // GC sweep done [timestamp, swept, reclaimed] + EvGoCreate event.Type = 13 // goroutine creation [timestamp, new goroutine id, new stack id, stack id] + EvGoStart event.Type = 14 // goroutine starts running [timestamp, goroutine id, seq] + EvGoEnd event.Type = 15 // goroutine ends [timestamp] + EvGoStop event.Type = 16 // goroutine stops (like in select{}) [timestamp, stack] + EvGoSched event.Type = 17 // goroutine calls Gosched [timestamp, stack] + EvGoPreempt event.Type = 18 // goroutine is preempted [timestamp, stack] + EvGoSleep event.Type = 19 // goroutine calls Sleep [timestamp, stack] + EvGoBlock event.Type = 20 // goroutine blocks [timestamp, stack] + EvGoUnblock event.Type = 21 // goroutine is unblocked [timestamp, goroutine id, seq, stack] + EvGoBlockSend event.Type = 22 // goroutine blocks on chan send [timestamp, stack] + EvGoBlockRecv event.Type = 23 // goroutine blocks on chan recv [timestamp, stack] + EvGoBlockSelect event.Type = 24 // goroutine blocks on select [timestamp, stack] + EvGoBlockSync event.Type = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack] + EvGoBlockCond event.Type = 26 // goroutine blocks on Cond [timestamp, stack] + EvGoBlockNet event.Type = 27 // goroutine blocks on network [timestamp, stack] + EvGoSysCall event.Type = 28 // syscall enter [timestamp, stack] + EvGoSysExit event.Type = 29 // syscall exit [timestamp, goroutine id, seq, real timestamp] + EvGoSysBlock event.Type = 30 // syscall blocks [timestamp] + EvGoWaiting event.Type = 31 // denotes that goroutine is blocked when tracing starts [timestamp, goroutine id] + EvGoInSyscall event.Type = 32 // denotes that goroutine is in syscall when tracing starts [timestamp, goroutine id] + EvHeapAlloc event.Type = 33 // gcController.heapLive change [timestamp, heap live bytes] + EvHeapGoal event.Type = 34 // gcController.heapGoal change [timestamp, heap goal bytes] + EvTimerGoroutine event.Type = 35 // denotes timer goroutine [timer goroutine id] + EvFutileWakeup event.Type = 36 // denotes that the previous wakeup of this goroutine was futile [timestamp] + EvString event.Type = 37 // string dictionary entry [ID, length, string] + EvGoStartLocal event.Type = 38 // goroutine starts running on the same P as the last event [timestamp, goroutine id] + EvGoUnblockLocal event.Type = 39 // goroutine is unblocked on the same P as the last event [timestamp, goroutine id, stack] + EvGoSysExitLocal event.Type = 40 // syscall exit on the same P as the last event [timestamp, goroutine id, real timestamp] + EvGoStartLabel event.Type = 41 // goroutine starts running with label [timestamp, goroutine id, seq, label string id] + EvGoBlockGC event.Type = 42 // goroutine blocks on GC assist [timestamp, stack] + EvGCMarkAssistStart event.Type = 43 // GC mark assist start [timestamp, stack] + EvGCMarkAssistDone event.Type = 44 // GC mark assist done [timestamp] + EvUserTaskCreate event.Type = 45 // trace.NewTask [timestamp, internal task id, internal parent id, stack, name string] + EvUserTaskEnd event.Type = 46 // end of task [timestamp, internal task id, stack] + EvUserRegion event.Type = 47 // trace.WithRegion [timestamp, internal task id, mode(0:start, 1:end), name string] + EvUserLog event.Type = 48 // trace.Log [timestamp, internal id, key string id, stack, value string] + EvCPUSample event.Type = 49 // CPU profiling sample [timestamp, stack, real timestamp, real P id (-1 when absent), goroutine id] + EvCount event.Type = 50 +) + +var EventDescriptions = [256]struct { + Name string + minVersion version.Version + Stack bool + Args []string + SArgs []string // string arguments +}{ + EvNone: {"None", 5, false, []string{}, nil}, + EvBatch: {"Batch", 5, false, []string{"p", "ticks"}, nil}, // in 1.5 format it was {"p", "seq", "ticks"} + EvFrequency: {"Frequency", 5, false, []string{"freq"}, nil}, // in 1.5 format it was {"freq", "unused"} + EvStack: {"Stack", 5, false, []string{"id", "siz"}, nil}, + EvGomaxprocs: {"Gomaxprocs", 5, true, []string{"procs"}, nil}, + EvProcStart: {"ProcStart", 5, false, []string{"thread"}, nil}, + EvProcStop: {"ProcStop", 5, false, []string{}, nil}, + EvGCStart: {"GCStart", 5, true, []string{"seq"}, nil}, // in 1.5 format it was {} + EvGCDone: {"GCDone", 5, false, []string{}, nil}, + EvSTWStart: {"GCSTWStart", 5, false, []string{"kindid"}, []string{"kind"}}, // <= 1.9, args was {} (implicitly {0}) + EvSTWDone: {"GCSTWDone", 5, false, []string{}, nil}, + EvGCSweepStart: {"GCSweepStart", 5, true, []string{}, nil}, + EvGCSweepDone: {"GCSweepDone", 5, false, []string{"swept", "reclaimed"}, nil}, // before 1.9, format was {} + EvGoCreate: {"GoCreate", 5, true, []string{"g", "stack"}, nil}, + EvGoStart: {"GoStart", 5, false, []string{"g", "seq"}, nil}, // in 1.5 format it was {"g"} + EvGoEnd: {"GoEnd", 5, false, []string{}, nil}, + EvGoStop: {"GoStop", 5, true, []string{}, nil}, + EvGoSched: {"GoSched", 5, true, []string{}, nil}, + EvGoPreempt: {"GoPreempt", 5, true, []string{}, nil}, + EvGoSleep: {"GoSleep", 5, true, []string{}, nil}, + EvGoBlock: {"GoBlock", 5, true, []string{}, nil}, + EvGoUnblock: {"GoUnblock", 5, true, []string{"g", "seq"}, nil}, // in 1.5 format it was {"g"} + EvGoBlockSend: {"GoBlockSend", 5, true, []string{}, nil}, + EvGoBlockRecv: {"GoBlockRecv", 5, true, []string{}, nil}, + EvGoBlockSelect: {"GoBlockSelect", 5, true, []string{}, nil}, + EvGoBlockSync: {"GoBlockSync", 5, true, []string{}, nil}, + EvGoBlockCond: {"GoBlockCond", 5, true, []string{}, nil}, + EvGoBlockNet: {"GoBlockNet", 5, true, []string{}, nil}, + EvGoSysCall: {"GoSysCall", 5, true, []string{}, nil}, + EvGoSysExit: {"GoSysExit", 5, false, []string{"g", "seq", "ts"}, nil}, + EvGoSysBlock: {"GoSysBlock", 5, false, []string{}, nil}, + EvGoWaiting: {"GoWaiting", 5, false, []string{"g"}, nil}, + EvGoInSyscall: {"GoInSyscall", 5, false, []string{"g"}, nil}, + EvHeapAlloc: {"HeapAlloc", 5, false, []string{"mem"}, nil}, + EvHeapGoal: {"HeapGoal", 5, false, []string{"mem"}, nil}, + EvTimerGoroutine: {"TimerGoroutine", 5, false, []string{"g"}, nil}, // in 1.5 format it was {"g", "unused"} + EvFutileWakeup: {"FutileWakeup", 5, false, []string{}, nil}, + EvString: {"String", 7, false, []string{}, nil}, + EvGoStartLocal: {"GoStartLocal", 7, false, []string{"g"}, nil}, + EvGoUnblockLocal: {"GoUnblockLocal", 7, true, []string{"g"}, nil}, + EvGoSysExitLocal: {"GoSysExitLocal", 7, false, []string{"g", "ts"}, nil}, + EvGoStartLabel: {"GoStartLabel", 8, false, []string{"g", "seq", "labelid"}, []string{"label"}}, + EvGoBlockGC: {"GoBlockGC", 8, true, []string{}, nil}, + EvGCMarkAssistStart: {"GCMarkAssistStart", 9, true, []string{}, nil}, + EvGCMarkAssistDone: {"GCMarkAssistDone", 9, false, []string{}, nil}, + EvUserTaskCreate: {"UserTaskCreate", 11, true, []string{"taskid", "pid", "typeid"}, []string{"name"}}, + EvUserTaskEnd: {"UserTaskEnd", 11, true, []string{"taskid"}, nil}, + EvUserRegion: {"UserRegion", 11, true, []string{"taskid", "mode", "typeid"}, []string{"name"}}, + EvUserLog: {"UserLog", 11, true, []string{"id", "keyid"}, []string{"category", "message"}}, + EvCPUSample: {"CPUSample", 19, true, []string{"ts", "p", "g"}, nil}, +} + +//gcassert:inline +func (p *parser) allocateStack(size uint64) []uint64 { + if size == 0 { + return nil + } + + // Stacks are plentiful but small. For our "Staticcheck on std" trace with + // 11e6 events, we have roughly 500,000 stacks, using 200 MiB of memory. To + // avoid making 500,000 small allocations we allocate backing arrays 1 MiB + // at a time. + out := p.stacksData + if uint64(len(out)) < size { + out = make([]uint64, 1024*128) + } + p.stacksData = out[size:] + return out[:size:size] +} + +func (tr *Trace) STWReason(kindID uint64) STWReason { + if tr.Version < 21 { + if kindID == 0 || kindID == 1 { + return STWReason(kindID + 1) + } else { + return STWUnknown + } + } else if tr.Version == 21 { + if kindID < NumSTWReasons { + return STWReason(kindID) + } else { + return STWUnknown + } + } else { + return STWUnknown + } +} + +type STWReason int + +const ( + STWUnknown STWReason = 0 + STWGCMarkTermination STWReason = 1 + STWGCSweepTermination STWReason = 2 + STWWriteHeapDump STWReason = 3 + STWGoroutineProfile STWReason = 4 + STWGoroutineProfileCleanup STWReason = 5 + STWAllGoroutinesStackTrace STWReason = 6 + STWReadMemStats STWReason = 7 + STWAllThreadsSyscall STWReason = 8 + STWGOMAXPROCS STWReason = 9 + STWStartTrace STWReason = 10 + STWStopTrace STWReason = 11 + STWCountPagesInUse STWReason = 12 + STWReadMetricsSlow STWReason = 13 + STWReadMemStatsSlow STWReason = 14 + STWPageCachePagesLeaked STWReason = 15 + STWResetDebugLog STWReason = 16 + + NumSTWReasons = 17 +) diff --git a/vendor/golang.org/x/exp/trace/internal/version/version.go b/vendor/golang.org/x/exp/trace/internal/version/version.go new file mode 100644 index 000000000000..c5e638acb045 --- /dev/null +++ b/vendor/golang.org/x/exp/trace/internal/version/version.go @@ -0,0 +1,75 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by "gen.bash" from internal/trace; DO NOT EDIT. + +//go:build go1.21 + +package version + +import ( + "fmt" + "io" + + "golang.org/x/exp/trace/internal/event" + "golang.org/x/exp/trace/internal/event/go122" +) + +// Version represents the version of a trace file. +type Version uint32 + +const ( + Go111 Version = 11 + Go119 Version = 19 + Go121 Version = 21 + Go122 Version = 22 + Go123 Version = 23 + Current = Go123 +) + +var versions = map[Version][]event.Spec{ + // Go 1.11–1.21 use a different parser and are only set here for the sake of + // Version.Valid. + Go111: nil, + Go119: nil, + Go121: nil, + + Go122: go122.Specs(), + // Go 1.23 adds backwards-incompatible events, but + // traces produced by Go 1.22 are also always valid + // Go 1.23 traces. + Go123: go122.Specs(), +} + +// Specs returns the set of event.Specs for this version. +func (v Version) Specs() []event.Spec { + return versions[v] +} + +func (v Version) Valid() bool { + _, ok := versions[v] + return ok +} + +// headerFmt is the format of the header of all Go execution traces. +const headerFmt = "go 1.%d trace\x00\x00\x00" + +// ReadHeader reads the version of the trace out of the trace file's +// header, whose prefix must be present in v. +func ReadHeader(r io.Reader) (Version, error) { + var v Version + _, err := fmt.Fscanf(r, headerFmt, &v) + if err != nil { + return v, fmt.Errorf("bad file format: not a Go execution trace?") + } + if !v.Valid() { + return v, fmt.Errorf("unknown or unsupported trace version go 1.%d", v) + } + return v, nil +} + +// WriteHeader writes a header for a trace version v to w. +func WriteHeader(w io.Writer, v Version) (int, error) { + return fmt.Fprintf(w, headerFmt, v) +} diff --git a/vendor/golang.org/x/exp/trace/oldtrace.go b/vendor/golang.org/x/exp/trace/oldtrace.go new file mode 100644 index 000000000000..4777a686fbf3 --- /dev/null +++ b/vendor/golang.org/x/exp/trace/oldtrace.go @@ -0,0 +1,572 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by "gen.bash" from internal/trace; DO NOT EDIT. + +//go:build go1.21 + +// This file implements conversion from old (Go 1.11–Go 1.21) traces to the Go +// 1.22 format. +// +// Most events have direct equivalents in 1.22, at worst requiring arguments to +// be reordered. Some events, such as GoWaiting need to look ahead for follow-up +// events to determine the correct translation. GoSyscall, which is an +// instantaneous event, gets turned into a 1 ns long pair of +// GoSyscallStart+GoSyscallEnd, unless we observe a GoSysBlock, in which case we +// emit a GoSyscallStart+GoSyscallEndBlocked pair with the correct duration +// (i.e. starting at the original GoSyscall). +// +// The resulting trace treats the old trace as a single, large generation, +// sharing a single evTable for all events. +// +// We use a new (compared to what was used for 'go tool trace' in earlier +// versions of Go) parser for old traces that is optimized for speed, low memory +// usage, and minimal GC pressure. It allocates events in batches so that even +// though we have to load the entire trace into memory, the conversion process +// shouldn't result in a doubling of memory usage, even if all converted events +// are kept alive, as we free batches once we're done with them. +// +// The conversion process is lossless. + +package trace + +import ( + "errors" + "fmt" + "golang.org/x/exp/trace/internal/event" + "golang.org/x/exp/trace/internal/event/go122" + "golang.org/x/exp/trace/internal/oldtrace" + "io" +) + +type oldTraceConverter struct { + trace oldtrace.Trace + evt *evTable + preInit bool + createdPreInit map[GoID]struct{} + events oldtrace.Events + extra []Event + extraArr [3]Event + tasks map[TaskID]taskState + seenProcs map[ProcID]struct{} + lastTs Time + procMs map[ProcID]ThreadID + lastStwReason uint64 + + inlineToStringID []uint64 + builtinToStringID []uint64 +} + +const ( + // Block reasons + sForever = iota + sPreempted + sGosched + sSleep + sChanSend + sChanRecv + sNetwork + sSync + sSyncCond + sSelect + sEmpty + sMarkAssistWait + + // STW kinds + sSTWUnknown + sSTWGCMarkTermination + sSTWGCSweepTermination + sSTWWriteHeapDump + sSTWGoroutineProfile + sSTWGoroutineProfileCleanup + sSTWAllGoroutinesStackTrace + sSTWReadMemStats + sSTWAllThreadsSyscall + sSTWGOMAXPROCS + sSTWStartTrace + sSTWStopTrace + sSTWCountPagesInUse + sSTWReadMetricsSlow + sSTWReadMemStatsSlow + sSTWPageCachePagesLeaked + sSTWResetDebugLog + + sLast +) + +func (it *oldTraceConverter) init(pr oldtrace.Trace) error { + it.trace = pr + it.preInit = true + it.createdPreInit = make(map[GoID]struct{}) + it.evt = &evTable{pcs: make(map[uint64]frame)} + it.events = pr.Events + it.extra = it.extraArr[:0] + it.tasks = make(map[TaskID]taskState) + it.seenProcs = make(map[ProcID]struct{}) + it.procMs = make(map[ProcID]ThreadID) + it.lastTs = -1 + + evt := it.evt + + // Convert from oldtracer's Strings map to our dataTable. + var max uint64 + for id, s := range pr.Strings { + evt.strings.insert(stringID(id), s) + if id > max { + max = id + } + } + pr.Strings = nil + + // Add all strings used for UserLog. In the old trace format, these were + // stored inline and didn't have IDs. We generate IDs for them. + if max+uint64(len(pr.InlineStrings)) < max { + return errors.New("trace contains too many strings") + } + var addErr error + add := func(id stringID, s string) { + if err := evt.strings.insert(id, s); err != nil && addErr == nil { + addErr = err + } + } + for id, s := range pr.InlineStrings { + nid := max + 1 + uint64(id) + it.inlineToStringID = append(it.inlineToStringID, nid) + add(stringID(nid), s) + } + max += uint64(len(pr.InlineStrings)) + pr.InlineStrings = nil + + // Add strings that the converter emits explicitly. + if max+uint64(sLast) < max { + return errors.New("trace contains too many strings") + } + it.builtinToStringID = make([]uint64, sLast) + addBuiltin := func(c int, s string) { + nid := max + 1 + uint64(c) + it.builtinToStringID[c] = nid + add(stringID(nid), s) + } + addBuiltin(sForever, "forever") + addBuiltin(sPreempted, "preempted") + addBuiltin(sGosched, "runtime.Gosched") + addBuiltin(sSleep, "sleep") + addBuiltin(sChanSend, "chan send") + addBuiltin(sChanRecv, "chan receive") + addBuiltin(sNetwork, "network") + addBuiltin(sSync, "sync") + addBuiltin(sSyncCond, "sync.(*Cond).Wait") + addBuiltin(sSelect, "select") + addBuiltin(sEmpty, "") + addBuiltin(sMarkAssistWait, "GC mark assist wait for work") + addBuiltin(sSTWUnknown, "") + addBuiltin(sSTWGCMarkTermination, "GC mark termination") + addBuiltin(sSTWGCSweepTermination, "GC sweep termination") + addBuiltin(sSTWWriteHeapDump, "write heap dump") + addBuiltin(sSTWGoroutineProfile, "goroutine profile") + addBuiltin(sSTWGoroutineProfileCleanup, "goroutine profile cleanup") + addBuiltin(sSTWAllGoroutinesStackTrace, "all goroutine stack trace") + addBuiltin(sSTWReadMemStats, "read mem stats") + addBuiltin(sSTWAllThreadsSyscall, "AllThreadsSyscall") + addBuiltin(sSTWGOMAXPROCS, "GOMAXPROCS") + addBuiltin(sSTWStartTrace, "start trace") + addBuiltin(sSTWStopTrace, "stop trace") + addBuiltin(sSTWCountPagesInUse, "CountPagesInUse (test)") + addBuiltin(sSTWReadMetricsSlow, "ReadMetricsSlow (test)") + addBuiltin(sSTWReadMemStatsSlow, "ReadMemStatsSlow (test)") + addBuiltin(sSTWPageCachePagesLeaked, "PageCachePagesLeaked (test)") + addBuiltin(sSTWResetDebugLog, "ResetDebugLog (test)") + + if addErr != nil { + // This should be impossible but let's be safe. + return fmt.Errorf("couldn't add strings: %w", addErr) + } + + it.evt.strings.compactify() + + // Convert stacks. + for id, stk := range pr.Stacks { + evt.stacks.insert(stackID(id), stack{pcs: stk}) + } + + // OPT(dh): if we could share the frame type between this package and + // oldtrace we wouldn't have to copy the map. + for pc, f := range pr.PCs { + evt.pcs[pc] = frame{ + pc: pc, + funcID: stringID(f.Fn), + fileID: stringID(f.File), + line: uint64(f.Line), + } + } + pr.Stacks = nil + pr.PCs = nil + evt.stacks.compactify() + return nil +} + +// next returns the next event, io.EOF if there are no more events, or a +// descriptive error for invalid events. +func (it *oldTraceConverter) next() (Event, error) { + if len(it.extra) > 0 { + ev := it.extra[0] + it.extra = it.extra[1:] + + if len(it.extra) == 0 { + it.extra = it.extraArr[:0] + } + // Two events aren't allowed to fall on the same timestamp in the new API, + // but this may happen when we produce EvGoStatus events + if ev.base.time <= it.lastTs { + ev.base.time = it.lastTs + 1 + } + it.lastTs = ev.base.time + return ev, nil + } + + oev, ok := it.events.Pop() + if !ok { + return Event{}, io.EOF + } + + ev, err := it.convertEvent(oev) + + if err == errSkip { + return it.next() + } else if err != nil { + return Event{}, err + } + + // Two events aren't allowed to fall on the same timestamp in the new API, + // but this may happen when we produce EvGoStatus events + if ev.base.time <= it.lastTs { + ev.base.time = it.lastTs + 1 + } + it.lastTs = ev.base.time + return ev, nil +} + +var errSkip = errors.New("skip event") + +// convertEvent converts an event from the old trace format to zero or more +// events in the new format. Most events translate 1 to 1. Some events don't +// result in an event right away, in which case convertEvent returns errSkip. +// Some events result in more than one new event; in this case, convertEvent +// returns the first event and stores additional events in it.extra. When +// encountering events that oldtrace shouldn't be able to emit, ocnvertEvent +// returns a descriptive error. +func (it *oldTraceConverter) convertEvent(ev *oldtrace.Event) (OUT Event, ERR error) { + var mappedType event.Type + var mappedArgs timedEventArgs + copy(mappedArgs[:], ev.Args[:]) + + switch ev.Type { + case oldtrace.EvGomaxprocs: + mappedType = go122.EvProcsChange + if it.preInit { + // The first EvGomaxprocs signals the end of trace initialization. At this point we've seen + // all goroutines that already existed at trace begin. + it.preInit = false + for gid := range it.createdPreInit { + // These are goroutines that already existed when tracing started but for which we + // received neither GoWaiting, GoInSyscall, or GoStart. These are goroutines that are in + // the states _Gidle or _Grunnable. + it.extra = append(it.extra, Event{ + ctx: schedCtx{ + // G: GoID(gid), + G: NoGoroutine, + P: NoProc, + M: NoThread, + }, + table: it.evt, + base: baseEvent{ + typ: go122.EvGoStatus, + time: Time(ev.Ts), + args: timedEventArgs{uint64(gid), ^uint64(0), uint64(go122.GoRunnable)}, + }, + }) + } + it.createdPreInit = nil + return Event{}, errSkip + } + case oldtrace.EvProcStart: + it.procMs[ProcID(ev.P)] = ThreadID(ev.Args[0]) + if _, ok := it.seenProcs[ProcID(ev.P)]; ok { + mappedType = go122.EvProcStart + mappedArgs = timedEventArgs{uint64(ev.P)} + } else { + it.seenProcs[ProcID(ev.P)] = struct{}{} + mappedType = go122.EvProcStatus + mappedArgs = timedEventArgs{uint64(ev.P), uint64(go122.ProcRunning)} + } + case oldtrace.EvProcStop: + if _, ok := it.seenProcs[ProcID(ev.P)]; ok { + mappedType = go122.EvProcStop + mappedArgs = timedEventArgs{uint64(ev.P)} + } else { + it.seenProcs[ProcID(ev.P)] = struct{}{} + mappedType = go122.EvProcStatus + mappedArgs = timedEventArgs{uint64(ev.P), uint64(go122.ProcIdle)} + } + case oldtrace.EvGCStart: + mappedType = go122.EvGCBegin + case oldtrace.EvGCDone: + mappedType = go122.EvGCEnd + case oldtrace.EvSTWStart: + sid := it.builtinToStringID[sSTWUnknown+it.trace.STWReason(ev.Args[0])] + it.lastStwReason = sid + mappedType = go122.EvSTWBegin + mappedArgs = timedEventArgs{uint64(sid)} + case oldtrace.EvSTWDone: + mappedType = go122.EvSTWEnd + mappedArgs = timedEventArgs{it.lastStwReason} + case oldtrace.EvGCSweepStart: + mappedType = go122.EvGCSweepBegin + case oldtrace.EvGCSweepDone: + mappedType = go122.EvGCSweepEnd + case oldtrace.EvGoCreate: + if it.preInit { + it.createdPreInit[GoID(ev.Args[0])] = struct{}{} + return Event{}, errSkip + } + mappedType = go122.EvGoCreate + case oldtrace.EvGoStart: + if it.preInit { + mappedType = go122.EvGoStatus + mappedArgs = timedEventArgs{ev.Args[0], ^uint64(0), uint64(go122.GoRunning)} + delete(it.createdPreInit, GoID(ev.Args[0])) + } else { + mappedType = go122.EvGoStart + } + case oldtrace.EvGoStartLabel: + it.extra = []Event{{ + ctx: schedCtx{ + G: GoID(ev.G), + P: ProcID(ev.P), + M: it.procMs[ProcID(ev.P)], + }, + table: it.evt, + base: baseEvent{ + typ: go122.EvGoLabel, + time: Time(ev.Ts), + args: timedEventArgs{ev.Args[2]}, + }, + }} + return Event{ + ctx: schedCtx{ + G: GoID(ev.G), + P: ProcID(ev.P), + M: it.procMs[ProcID(ev.P)], + }, + table: it.evt, + base: baseEvent{ + typ: go122.EvGoStart, + time: Time(ev.Ts), + args: mappedArgs, + }, + }, nil + case oldtrace.EvGoEnd: + mappedType = go122.EvGoDestroy + case oldtrace.EvGoStop: + mappedType = go122.EvGoBlock + mappedArgs = timedEventArgs{uint64(it.builtinToStringID[sForever]), uint64(ev.StkID)} + case oldtrace.EvGoSched: + mappedType = go122.EvGoStop + mappedArgs = timedEventArgs{uint64(it.builtinToStringID[sGosched]), uint64(ev.StkID)} + case oldtrace.EvGoPreempt: + mappedType = go122.EvGoStop + mappedArgs = timedEventArgs{uint64(it.builtinToStringID[sPreempted]), uint64(ev.StkID)} + case oldtrace.EvGoSleep: + mappedType = go122.EvGoBlock + mappedArgs = timedEventArgs{uint64(it.builtinToStringID[sSleep]), uint64(ev.StkID)} + case oldtrace.EvGoBlock: + mappedType = go122.EvGoBlock + mappedArgs = timedEventArgs{uint64(it.builtinToStringID[sEmpty]), uint64(ev.StkID)} + case oldtrace.EvGoUnblock: + mappedType = go122.EvGoUnblock + case oldtrace.EvGoBlockSend: + mappedType = go122.EvGoBlock + mappedArgs = timedEventArgs{uint64(it.builtinToStringID[sChanSend]), uint64(ev.StkID)} + case oldtrace.EvGoBlockRecv: + mappedType = go122.EvGoBlock + mappedArgs = timedEventArgs{uint64(it.builtinToStringID[sChanRecv]), uint64(ev.StkID)} + case oldtrace.EvGoBlockSelect: + mappedType = go122.EvGoBlock + mappedArgs = timedEventArgs{uint64(it.builtinToStringID[sSelect]), uint64(ev.StkID)} + case oldtrace.EvGoBlockSync: + mappedType = go122.EvGoBlock + mappedArgs = timedEventArgs{uint64(it.builtinToStringID[sSync]), uint64(ev.StkID)} + case oldtrace.EvGoBlockCond: + mappedType = go122.EvGoBlock + mappedArgs = timedEventArgs{uint64(it.builtinToStringID[sSyncCond]), uint64(ev.StkID)} + case oldtrace.EvGoBlockNet: + mappedType = go122.EvGoBlock + mappedArgs = timedEventArgs{uint64(it.builtinToStringID[sNetwork]), uint64(ev.StkID)} + case oldtrace.EvGoBlockGC: + mappedType = go122.EvGoBlock + mappedArgs = timedEventArgs{uint64(it.builtinToStringID[sMarkAssistWait]), uint64(ev.StkID)} + case oldtrace.EvGoSysCall: + // Look for the next event for the same G to determine if the syscall + // blocked. + blocked := false + it.events.All()(func(nev *oldtrace.Event) bool { + if nev.G != ev.G { + return true + } + // After an EvGoSysCall, the next event on the same G will either be + // EvGoSysBlock to denote a blocking syscall, or some other event + // (or the end of the trace) if the syscall didn't block. + if nev.Type == oldtrace.EvGoSysBlock { + blocked = true + } + return false + }) + if blocked { + mappedType = go122.EvGoSyscallBegin + mappedArgs = timedEventArgs{1: uint64(ev.StkID)} + } else { + // Convert the old instantaneous syscall event to a pair of syscall + // begin and syscall end and give it the shortest possible duration, + // 1ns. + out1 := Event{ + ctx: schedCtx{ + G: GoID(ev.G), + P: ProcID(ev.P), + M: it.procMs[ProcID(ev.P)], + }, + table: it.evt, + base: baseEvent{ + typ: go122.EvGoSyscallBegin, + time: Time(ev.Ts), + args: timedEventArgs{1: uint64(ev.StkID)}, + }, + } + + out2 := Event{ + ctx: out1.ctx, + table: it.evt, + base: baseEvent{ + typ: go122.EvGoSyscallEnd, + time: Time(ev.Ts + 1), + args: timedEventArgs{}, + }, + } + + it.extra = append(it.extra, out2) + return out1, nil + } + + case oldtrace.EvGoSysExit: + mappedType = go122.EvGoSyscallEndBlocked + case oldtrace.EvGoSysBlock: + return Event{}, errSkip + case oldtrace.EvGoWaiting: + mappedType = go122.EvGoStatus + mappedArgs = timedEventArgs{ev.Args[0], ^uint64(0), uint64(go122.GoWaiting)} + delete(it.createdPreInit, GoID(ev.Args[0])) + case oldtrace.EvGoInSyscall: + mappedType = go122.EvGoStatus + // In the new tracer, GoStatus with GoSyscall knows what thread the + // syscall is on. In the old tracer, EvGoInSyscall doesn't contain that + // information and all we can do here is specify NoThread. + mappedArgs = timedEventArgs{ev.Args[0], ^uint64(0), uint64(go122.GoSyscall)} + delete(it.createdPreInit, GoID(ev.Args[0])) + case oldtrace.EvHeapAlloc: + mappedType = go122.EvHeapAlloc + case oldtrace.EvHeapGoal: + mappedType = go122.EvHeapGoal + case oldtrace.EvGCMarkAssistStart: + mappedType = go122.EvGCMarkAssistBegin + case oldtrace.EvGCMarkAssistDone: + mappedType = go122.EvGCMarkAssistEnd + case oldtrace.EvUserTaskCreate: + mappedType = go122.EvUserTaskBegin + parent := ev.Args[1] + if parent == 0 { + parent = uint64(NoTask) + } + mappedArgs = timedEventArgs{ev.Args[0], parent, ev.Args[2], uint64(ev.StkID)} + name, _ := it.evt.strings.get(stringID(ev.Args[2])) + it.tasks[TaskID(ev.Args[0])] = taskState{name: name, parentID: TaskID(ev.Args[1])} + case oldtrace.EvUserTaskEnd: + mappedType = go122.EvUserTaskEnd + // Event.Task expects the parent and name to be smuggled in extra args + // and as extra strings. + ts, ok := it.tasks[TaskID(ev.Args[0])] + if ok { + delete(it.tasks, TaskID(ev.Args[0])) + mappedArgs = timedEventArgs{ + ev.Args[0], + ev.Args[1], + uint64(ts.parentID), + uint64(it.evt.addExtraString(ts.name)), + } + } else { + mappedArgs = timedEventArgs{ev.Args[0], ev.Args[1], uint64(NoTask), uint64(it.evt.addExtraString(""))} + } + case oldtrace.EvUserRegion: + switch ev.Args[1] { + case 0: // start + mappedType = go122.EvUserRegionBegin + case 1: // end + mappedType = go122.EvUserRegionEnd + } + mappedArgs = timedEventArgs{ev.Args[0], ev.Args[2], uint64(ev.StkID)} + case oldtrace.EvUserLog: + mappedType = go122.EvUserLog + mappedArgs = timedEventArgs{ev.Args[0], ev.Args[1], it.inlineToStringID[ev.Args[3]], uint64(ev.StkID)} + case oldtrace.EvCPUSample: + mappedType = go122.EvCPUSample + // When emitted by the Go 1.22 tracer, CPU samples have 5 arguments: + // timestamp, M, P, G, stack. However, after they get turned into Event, + // they have the arguments stack, M, P, G. + // + // In Go 1.21, CPU samples did not have Ms. + mappedArgs = timedEventArgs{uint64(ev.StkID), ^uint64(0), uint64(ev.P), ev.G} + default: + return Event{}, fmt.Errorf("unexpected event type %v", ev.Type) + } + + if oldtrace.EventDescriptions[ev.Type].Stack { + if stackIDs := go122.Specs()[mappedType].StackIDs; len(stackIDs) > 0 { + mappedArgs[stackIDs[0]-1] = uint64(ev.StkID) + } + } + + m := NoThread + if ev.P != -1 && ev.Type != oldtrace.EvCPUSample { + if t, ok := it.procMs[ProcID(ev.P)]; ok { + m = ThreadID(t) + } + } + if ev.Type == oldtrace.EvProcStop { + delete(it.procMs, ProcID(ev.P)) + } + g := GoID(ev.G) + if g == 0 { + g = NoGoroutine + } + out := Event{ + ctx: schedCtx{ + G: GoID(g), + P: ProcID(ev.P), + M: m, + }, + table: it.evt, + base: baseEvent{ + typ: mappedType, + time: Time(ev.Ts), + args: mappedArgs, + }, + } + return out, nil +} + +// convertOldFormat takes a fully loaded trace in the old trace format and +// returns an iterator over events in the new format. +func convertOldFormat(pr oldtrace.Trace) *oldTraceConverter { + it := &oldTraceConverter{} + it.init(pr) + return it +} diff --git a/vendor/golang.org/x/exp/trace/order.go b/vendor/golang.org/x/exp/trace/order.go new file mode 100644 index 000000000000..604c4e5343d0 --- /dev/null +++ b/vendor/golang.org/x/exp/trace/order.go @@ -0,0 +1,1403 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by "gen.bash" from internal/trace; DO NOT EDIT. + +//go:build go1.21 + +package trace + +import ( + "fmt" + "strings" + + "golang.org/x/exp/trace/internal/event" + "golang.org/x/exp/trace/internal/event/go122" + "golang.org/x/exp/trace/internal/version" +) + +// ordering emulates Go scheduler state for both validation and +// for putting events in the right order. +// +// The interface to ordering consists of two methods: Advance +// and Next. Advance is called to try and advance an event and +// add completed events to the ordering. Next is used to pick +// off events in the ordering. +type ordering struct { + gStates map[GoID]*gState + pStates map[ProcID]*pState // TODO: The keys are dense, so this can be a slice. + mStates map[ThreadID]*mState + activeTasks map[TaskID]taskState + gcSeq uint64 + gcState gcState + initialGen uint64 + queue queue[Event] +} + +// Advance checks if it's valid to proceed with ev which came from thread m. +// +// It assumes the gen value passed to it is monotonically increasing across calls. +// +// If any error is returned, then the trace is broken and trace parsing must cease. +// If it's not valid to advance with ev, but no error was encountered, the caller +// should attempt to advance with other candidate events from other threads. If the +// caller runs out of candidates, the trace is invalid. +// +// If this returns true, Next is guaranteed to return a complete event. However, +// multiple events may be added to the ordering, so the caller should (but is not +// required to) continue to call Next until it is exhausted. +func (o *ordering) Advance(ev *baseEvent, evt *evTable, m ThreadID, gen uint64) (bool, error) { + if o.initialGen == 0 { + // Set the initial gen if necessary. + o.initialGen = gen + } + + var curCtx, newCtx schedCtx + curCtx.M = m + newCtx.M = m + + var ms *mState + if m == NoThread { + curCtx.P = NoProc + curCtx.G = NoGoroutine + newCtx = curCtx + } else { + // Pull out or create the mState for this event. + var ok bool + ms, ok = o.mStates[m] + if !ok { + ms = &mState{ + g: NoGoroutine, + p: NoProc, + } + o.mStates[m] = ms + } + curCtx.P = ms.p + curCtx.G = ms.g + newCtx = curCtx + } + + f := orderingDispatch[ev.typ] + if f == nil { + return false, fmt.Errorf("bad event type found while ordering: %v", ev.typ) + } + newCtx, ok, err := f(o, ev, evt, m, gen, curCtx) + if err == nil && ok && ms != nil { + // Update the mState for this event. + ms.p = newCtx.P + ms.g = newCtx.G + } + return ok, err +} + +type orderingHandleFunc func(o *ordering, ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) + +var orderingDispatch = [256]orderingHandleFunc{ + // Procs. + go122.EvProcsChange: (*ordering).advanceAnnotation, + go122.EvProcStart: (*ordering).advanceProcStart, + go122.EvProcStop: (*ordering).advanceProcStop, + go122.EvProcSteal: (*ordering).advanceProcSteal, + go122.EvProcStatus: (*ordering).advanceProcStatus, + + // Goroutines. + go122.EvGoCreate: (*ordering).advanceGoCreate, + go122.EvGoCreateSyscall: (*ordering).advanceGoCreateSyscall, + go122.EvGoStart: (*ordering).advanceGoStart, + go122.EvGoDestroy: (*ordering).advanceGoStopExec, + go122.EvGoDestroySyscall: (*ordering).advanceGoDestroySyscall, + go122.EvGoStop: (*ordering).advanceGoStopExec, + go122.EvGoBlock: (*ordering).advanceGoStopExec, + go122.EvGoUnblock: (*ordering).advanceGoUnblock, + go122.EvGoSyscallBegin: (*ordering).advanceGoSyscallBegin, + go122.EvGoSyscallEnd: (*ordering).advanceGoSyscallEnd, + go122.EvGoSyscallEndBlocked: (*ordering).advanceGoSyscallEndBlocked, + go122.EvGoStatus: (*ordering).advanceGoStatus, + + // STW. + go122.EvSTWBegin: (*ordering).advanceGoRangeBegin, + go122.EvSTWEnd: (*ordering).advanceGoRangeEnd, + + // GC events. + go122.EvGCActive: (*ordering).advanceGCActive, + go122.EvGCBegin: (*ordering).advanceGCBegin, + go122.EvGCEnd: (*ordering).advanceGCEnd, + go122.EvGCSweepActive: (*ordering).advanceGCSweepActive, + go122.EvGCSweepBegin: (*ordering).advanceGCSweepBegin, + go122.EvGCSweepEnd: (*ordering).advanceGCSweepEnd, + go122.EvGCMarkAssistActive: (*ordering).advanceGoRangeActive, + go122.EvGCMarkAssistBegin: (*ordering).advanceGoRangeBegin, + go122.EvGCMarkAssistEnd: (*ordering).advanceGoRangeEnd, + go122.EvHeapAlloc: (*ordering).advanceHeapMetric, + go122.EvHeapGoal: (*ordering).advanceHeapMetric, + + // Annotations. + go122.EvGoLabel: (*ordering).advanceAnnotation, + go122.EvUserTaskBegin: (*ordering).advanceUserTaskBegin, + go122.EvUserTaskEnd: (*ordering).advanceUserTaskEnd, + go122.EvUserRegionBegin: (*ordering).advanceUserRegionBegin, + go122.EvUserRegionEnd: (*ordering).advanceUserRegionEnd, + go122.EvUserLog: (*ordering).advanceAnnotation, + + // Coroutines. Added in Go 1.23. + go122.EvGoSwitch: (*ordering).advanceGoSwitch, + go122.EvGoSwitchDestroy: (*ordering).advanceGoSwitch, + go122.EvGoCreateBlocked: (*ordering).advanceGoCreate, + + // GoStatus event with a stack. Added in Go 1.23. + go122.EvGoStatusStack: (*ordering).advanceGoStatus, + + // Experimental events. + + // Experimental heap span events. Added in Go 1.23. + go122.EvSpan: (*ordering).advanceAllocFree, + go122.EvSpanAlloc: (*ordering).advanceAllocFree, + go122.EvSpanFree: (*ordering).advanceAllocFree, + + // Experimental heap object events. Added in Go 1.23. + go122.EvHeapObject: (*ordering).advanceAllocFree, + go122.EvHeapObjectAlloc: (*ordering).advanceAllocFree, + go122.EvHeapObjectFree: (*ordering).advanceAllocFree, + + // Experimental goroutine stack events. Added in Go 1.23. + go122.EvGoroutineStack: (*ordering).advanceAllocFree, + go122.EvGoroutineStackAlloc: (*ordering).advanceAllocFree, + go122.EvGoroutineStackFree: (*ordering).advanceAllocFree, +} + +func (o *ordering) advanceProcStatus(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + pid := ProcID(ev.args[0]) + status := go122.ProcStatus(ev.args[1]) + if int(status) >= len(go122ProcStatus2ProcState) { + return curCtx, false, fmt.Errorf("invalid status for proc %d: %d", pid, status) + } + oldState := go122ProcStatus2ProcState[status] + if s, ok := o.pStates[pid]; ok { + if status == go122.ProcSyscallAbandoned && s.status == go122.ProcSyscall { + // ProcSyscallAbandoned is a special case of ProcSyscall. It indicates a + // potential loss of information, but if we're already in ProcSyscall, + // we haven't lost the relevant information. Promote the status and advance. + oldState = ProcRunning + ev.args[1] = uint64(go122.ProcSyscall) + } else if status == go122.ProcSyscallAbandoned && s.status == go122.ProcSyscallAbandoned { + // If we're passing through ProcSyscallAbandoned, then there's no promotion + // to do. We've lost the M that this P is associated with. However it got there, + // it's going to appear as idle in the API, so pass through as idle. + oldState = ProcIdle + ev.args[1] = uint64(go122.ProcSyscallAbandoned) + } else if s.status != status { + return curCtx, false, fmt.Errorf("inconsistent status for proc %d: old %v vs. new %v", pid, s.status, status) + } + s.seq = makeSeq(gen, 0) // Reset seq. + } else { + o.pStates[pid] = &pState{id: pid, status: status, seq: makeSeq(gen, 0)} + if gen == o.initialGen { + oldState = ProcUndetermined + } else { + oldState = ProcNotExist + } + } + ev.extra(version.Go122)[0] = uint64(oldState) // Smuggle in the old state for StateTransition. + + // Bind the proc to the new context, if it's running. + newCtx := curCtx + if status == go122.ProcRunning || status == go122.ProcSyscall { + newCtx.P = pid + } + // If we're advancing through ProcSyscallAbandoned *but* oldState is running then we've + // promoted it to ProcSyscall. However, because it's ProcSyscallAbandoned, we know this + // P is about to get stolen and its status very likely isn't being emitted by the same + // thread it was bound to. Since this status is Running -> Running and Running is binding, + // we need to make sure we emit it in the right context: the context to which it is bound. + // Find it, and set our current context to it. + if status == go122.ProcSyscallAbandoned && oldState == ProcRunning { + // N.B. This is slow but it should be fairly rare. + found := false + for mid, ms := range o.mStates { + if ms.p == pid { + curCtx.M = mid + curCtx.P = pid + curCtx.G = ms.g + found = true + } + } + if !found { + return curCtx, false, fmt.Errorf("failed to find sched context for proc %d that's about to be stolen", pid) + } + } + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return newCtx, true, nil +} + +func (o *ordering) advanceProcStart(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + pid := ProcID(ev.args[0]) + seq := makeSeq(gen, ev.args[1]) + + // Try to advance. We might fail here due to sequencing, because the P hasn't + // had a status emitted, or because we already have a P and we're in a syscall, + // and we haven't observed that it was stolen from us yet. + state, ok := o.pStates[pid] + if !ok || state.status != go122.ProcIdle || !seq.succeeds(state.seq) || curCtx.P != NoProc { + // We can't make an inference as to whether this is bad. We could just be seeing + // a ProcStart on a different M before the proc's state was emitted, or before we + // got to the right point in the trace. + // + // Note that we also don't advance here if we have a P and we're in a syscall. + return curCtx, false, nil + } + // We can advance this P. Check some invariants. + // + // We might have a goroutine if a goroutine is exiting a syscall. + reqs := event.SchedReqs{Thread: event.MustHave, Proc: event.MustNotHave, Goroutine: event.MayHave} + if err := validateCtx(curCtx, reqs); err != nil { + return curCtx, false, err + } + state.status = go122.ProcRunning + state.seq = seq + newCtx := curCtx + newCtx.P = pid + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return newCtx, true, nil +} + +func (o *ordering) advanceProcStop(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + // We must be able to advance this P. + // + // There are 2 ways a P can stop: ProcStop and ProcSteal. ProcStop is used when the P + // is stopped by the same M that started it, while ProcSteal is used when another M + // steals the P by stopping it from a distance. + // + // Since a P is bound to an M, and we're stopping on the same M we started, it must + // always be possible to advance the current M's P from a ProcStop. This is also why + // ProcStop doesn't need a sequence number. + state, ok := o.pStates[curCtx.P] + if !ok { + return curCtx, false, fmt.Errorf("event %s for proc (%v) that doesn't exist", go122.EventString(ev.typ), curCtx.P) + } + if state.status != go122.ProcRunning && state.status != go122.ProcSyscall { + return curCtx, false, fmt.Errorf("%s event for proc that's not %s or %s", go122.EventString(ev.typ), go122.ProcRunning, go122.ProcSyscall) + } + reqs := event.SchedReqs{Thread: event.MustHave, Proc: event.MustHave, Goroutine: event.MayHave} + if err := validateCtx(curCtx, reqs); err != nil { + return curCtx, false, err + } + state.status = go122.ProcIdle + newCtx := curCtx + newCtx.P = NoProc + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return newCtx, true, nil +} + +func (o *ordering) advanceProcSteal(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + pid := ProcID(ev.args[0]) + seq := makeSeq(gen, ev.args[1]) + state, ok := o.pStates[pid] + if !ok || (state.status != go122.ProcSyscall && state.status != go122.ProcSyscallAbandoned) || !seq.succeeds(state.seq) { + // We can't make an inference as to whether this is bad. We could just be seeing + // a ProcStart on a different M before the proc's state was emitted, or before we + // got to the right point in the trace. + return curCtx, false, nil + } + // We can advance this P. Check some invariants. + reqs := event.SchedReqs{Thread: event.MustHave, Proc: event.MayHave, Goroutine: event.MayHave} + if err := validateCtx(curCtx, reqs); err != nil { + return curCtx, false, err + } + // Smuggle in the P state that let us advance so we can surface information to the event. + // Specifically, we need to make sure that the event is interpreted not as a transition of + // ProcRunning -> ProcIdle but ProcIdle -> ProcIdle instead. + // + // ProcRunning is binding, but we may be running with a P on the current M and we can't + // bind another P. This P is about to go ProcIdle anyway. + oldStatus := state.status + ev.extra(version.Go122)[0] = uint64(oldStatus) + + // Update the P's status and sequence number. + state.status = go122.ProcIdle + state.seq = seq + + // If we've lost information then don't try to do anything with the M. + // It may have moved on and we can't be sure. + if oldStatus == go122.ProcSyscallAbandoned { + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return curCtx, true, nil + } + + // Validate that the M we're stealing from is what we expect. + mid := ThreadID(ev.args[2]) // The M we're stealing from. + + newCtx := curCtx + if mid == curCtx.M { + // We're stealing from ourselves. This behaves like a ProcStop. + if curCtx.P != pid { + return curCtx, false, fmt.Errorf("tried to self-steal proc %d (thread %d), but got proc %d instead", pid, mid, curCtx.P) + } + newCtx.P = NoProc + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return newCtx, true, nil + } + + // We're stealing from some other M. + mState, ok := o.mStates[mid] + if !ok { + return curCtx, false, fmt.Errorf("stole proc from non-existent thread %d", mid) + } + + // Make sure we're actually stealing the right P. + if mState.p != pid { + return curCtx, false, fmt.Errorf("tried to steal proc %d from thread %d, but got proc %d instead", pid, mid, mState.p) + } + + // Tell the M it has no P so it can proceed. + // + // This is safe because we know the P was in a syscall and + // the other M must be trying to get out of the syscall. + // GoSyscallEndBlocked cannot advance until the corresponding + // M loses its P. + mState.p = NoProc + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return newCtx, true, nil +} + +func (o *ordering) advanceGoStatus(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + gid := GoID(ev.args[0]) + mid := ThreadID(ev.args[1]) + status := go122.GoStatus(ev.args[2]) + + if int(status) >= len(go122GoStatus2GoState) { + return curCtx, false, fmt.Errorf("invalid status for goroutine %d: %d", gid, status) + } + oldState := go122GoStatus2GoState[status] + if s, ok := o.gStates[gid]; ok { + if s.status != status { + return curCtx, false, fmt.Errorf("inconsistent status for goroutine %d: old %v vs. new %v", gid, s.status, status) + } + s.seq = makeSeq(gen, 0) // Reset seq. + } else if gen == o.initialGen { + // Set the state. + o.gStates[gid] = &gState{id: gid, status: status, seq: makeSeq(gen, 0)} + oldState = GoUndetermined + } else { + return curCtx, false, fmt.Errorf("found goroutine status for new goroutine after the first generation: id=%v status=%v", gid, status) + } + ev.extra(version.Go122)[0] = uint64(oldState) // Smuggle in the old state for StateTransition. + + newCtx := curCtx + switch status { + case go122.GoRunning: + // Bind the goroutine to the new context, since it's running. + newCtx.G = gid + case go122.GoSyscall: + if mid == NoThread { + return curCtx, false, fmt.Errorf("found goroutine %d in syscall without a thread", gid) + } + // Is the syscall on this thread? If so, bind it to the context. + // Otherwise, we're talking about a G sitting in a syscall on an M. + // Validate the named M. + if mid == curCtx.M { + if gen != o.initialGen && curCtx.G != gid { + // If this isn't the first generation, we *must* have seen this + // binding occur already. Even if the G was blocked in a syscall + // for multiple generations since trace start, we would have seen + // a previous GoStatus event that bound the goroutine to an M. + return curCtx, false, fmt.Errorf("inconsistent thread for syscalling goroutine %d: thread has goroutine %d", gid, curCtx.G) + } + newCtx.G = gid + break + } + // Now we're talking about a thread and goroutine that have been + // blocked on a syscall for the entire generation. This case must + // not have a P; the runtime makes sure that all Ps are traced at + // the beginning of a generation, which involves taking a P back + // from every thread. + ms, ok := o.mStates[mid] + if ok { + // This M has been seen. That means we must have seen this + // goroutine go into a syscall on this thread at some point. + if ms.g != gid { + // But the G on the M doesn't match. Something's wrong. + return curCtx, false, fmt.Errorf("inconsistent thread for syscalling goroutine %d: thread has goroutine %d", gid, ms.g) + } + // This case is just a Syscall->Syscall event, which needs to + // appear as having the G currently bound to this M. + curCtx.G = ms.g + } else if !ok { + // The M hasn't been seen yet. That means this goroutine + // has just been sitting in a syscall on this M. Create + // a state for it. + o.mStates[mid] = &mState{g: gid, p: NoProc} + // Don't set curCtx.G in this case because this event is the + // binding event (and curCtx represents the "before" state). + } + // Update the current context to the M we're talking about. + curCtx.M = mid + } + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return newCtx, true, nil +} + +func (o *ordering) advanceGoCreate(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + // Goroutines must be created on a running P, but may or may not be created + // by a running goroutine. + reqs := event.SchedReqs{Thread: event.MustHave, Proc: event.MustHave, Goroutine: event.MayHave} + if err := validateCtx(curCtx, reqs); err != nil { + return curCtx, false, err + } + // If we have a goroutine, it must be running. + if state, ok := o.gStates[curCtx.G]; ok && state.status != go122.GoRunning { + return curCtx, false, fmt.Errorf("%s event for goroutine that's not %s", go122.EventString(ev.typ), GoRunning) + } + // This goroutine created another. Add a state for it. + newgid := GoID(ev.args[0]) + if _, ok := o.gStates[newgid]; ok { + return curCtx, false, fmt.Errorf("tried to create goroutine (%v) that already exists", newgid) + } + status := go122.GoRunnable + if ev.typ == go122.EvGoCreateBlocked { + status = go122.GoWaiting + } + o.gStates[newgid] = &gState{id: newgid, status: status, seq: makeSeq(gen, 0)} + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return curCtx, true, nil +} + +func (o *ordering) advanceGoStopExec(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + // These are goroutine events that all require an active running + // goroutine on some thread. They must *always* be advance-able, + // since running goroutines are bound to their M. + if err := validateCtx(curCtx, event.UserGoReqs); err != nil { + return curCtx, false, err + } + state, ok := o.gStates[curCtx.G] + if !ok { + return curCtx, false, fmt.Errorf("event %s for goroutine (%v) that doesn't exist", go122.EventString(ev.typ), curCtx.G) + } + if state.status != go122.GoRunning { + return curCtx, false, fmt.Errorf("%s event for goroutine that's not %s", go122.EventString(ev.typ), GoRunning) + } + // Handle each case slightly differently; we just group them together + // because they have shared preconditions. + newCtx := curCtx + switch ev.typ { + case go122.EvGoDestroy: + // This goroutine is exiting itself. + delete(o.gStates, curCtx.G) + newCtx.G = NoGoroutine + case go122.EvGoStop: + // Goroutine stopped (yielded). It's runnable but not running on this M. + state.status = go122.GoRunnable + newCtx.G = NoGoroutine + case go122.EvGoBlock: + // Goroutine blocked. It's waiting now and not running on this M. + state.status = go122.GoWaiting + newCtx.G = NoGoroutine + } + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return newCtx, true, nil +} + +func (o *ordering) advanceGoStart(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + gid := GoID(ev.args[0]) + seq := makeSeq(gen, ev.args[1]) + state, ok := o.gStates[gid] + if !ok || state.status != go122.GoRunnable || !seq.succeeds(state.seq) { + // We can't make an inference as to whether this is bad. We could just be seeing + // a GoStart on a different M before the goroutine was created, before it had its + // state emitted, or before we got to the right point in the trace yet. + return curCtx, false, nil + } + // We can advance this goroutine. Check some invariants. + reqs := event.SchedReqs{Thread: event.MustHave, Proc: event.MustHave, Goroutine: event.MustNotHave} + if err := validateCtx(curCtx, reqs); err != nil { + return curCtx, false, err + } + state.status = go122.GoRunning + state.seq = seq + newCtx := curCtx + newCtx.G = gid + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return newCtx, true, nil +} + +func (o *ordering) advanceGoUnblock(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + // N.B. These both reference the goroutine to unblock, not the current goroutine. + gid := GoID(ev.args[0]) + seq := makeSeq(gen, ev.args[1]) + state, ok := o.gStates[gid] + if !ok || state.status != go122.GoWaiting || !seq.succeeds(state.seq) { + // We can't make an inference as to whether this is bad. We could just be seeing + // a GoUnblock on a different M before the goroutine was created and blocked itself, + // before it had its state emitted, or before we got to the right point in the trace yet. + return curCtx, false, nil + } + state.status = go122.GoRunnable + state.seq = seq + // N.B. No context to validate. Basically anything can unblock + // a goroutine (e.g. sysmon). + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return curCtx, true, nil +} + +func (o *ordering) advanceGoSwitch(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + // GoSwitch and GoSwitchDestroy represent a trio of events: + // - Unblock of the goroutine to switch to. + // - Block or destroy of the current goroutine. + // - Start executing the next goroutine. + // + // Because it acts like a GoStart for the next goroutine, we can + // only advance it if the sequence numbers line up. + // + // The current goroutine on the thread must be actively running. + if err := validateCtx(curCtx, event.UserGoReqs); err != nil { + return curCtx, false, err + } + curGState, ok := o.gStates[curCtx.G] + if !ok { + return curCtx, false, fmt.Errorf("event %s for goroutine (%v) that doesn't exist", go122.EventString(ev.typ), curCtx.G) + } + if curGState.status != go122.GoRunning { + return curCtx, false, fmt.Errorf("%s event for goroutine that's not %s", go122.EventString(ev.typ), GoRunning) + } + nextg := GoID(ev.args[0]) + seq := makeSeq(gen, ev.args[1]) // seq is for nextg, not curCtx.G. + nextGState, ok := o.gStates[nextg] + if !ok || nextGState.status != go122.GoWaiting || !seq.succeeds(nextGState.seq) { + // We can't make an inference as to whether this is bad. We could just be seeing + // a GoSwitch on a different M before the goroutine was created, before it had its + // state emitted, or before we got to the right point in the trace yet. + return curCtx, false, nil + } + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + + // Update the state of the executing goroutine and emit an event for it + // (GoSwitch and GoSwitchDestroy will be interpreted as GoUnblock events + // for nextg). + switch ev.typ { + case go122.EvGoSwitch: + // Goroutine blocked. It's waiting now and not running on this M. + curGState.status = go122.GoWaiting + + // Emit a GoBlock event. + // TODO(mknyszek): Emit a reason. + o.queue.push(makeEvent(evt, curCtx, go122.EvGoBlock, ev.time, 0 /* no reason */, 0 /* no stack */)) + case go122.EvGoSwitchDestroy: + // This goroutine is exiting itself. + delete(o.gStates, curCtx.G) + + // Emit a GoDestroy event. + o.queue.push(makeEvent(evt, curCtx, go122.EvGoDestroy, ev.time)) + } + // Update the state of the next goroutine. + nextGState.status = go122.GoRunning + nextGState.seq = seq + newCtx := curCtx + newCtx.G = nextg + + // Queue an event for the next goroutine starting to run. + startCtx := curCtx + startCtx.G = NoGoroutine + o.queue.push(makeEvent(evt, startCtx, go122.EvGoStart, ev.time, uint64(nextg), ev.args[1])) + return newCtx, true, nil +} + +func (o *ordering) advanceGoSyscallBegin(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + // Entering a syscall requires an active running goroutine with a + // proc on some thread. It is always advancable. + if err := validateCtx(curCtx, event.UserGoReqs); err != nil { + return curCtx, false, err + } + state, ok := o.gStates[curCtx.G] + if !ok { + return curCtx, false, fmt.Errorf("event %s for goroutine (%v) that doesn't exist", go122.EventString(ev.typ), curCtx.G) + } + if state.status != go122.GoRunning { + return curCtx, false, fmt.Errorf("%s event for goroutine that's not %s", go122.EventString(ev.typ), GoRunning) + } + // Goroutine entered a syscall. It's still running on this P and M. + state.status = go122.GoSyscall + pState, ok := o.pStates[curCtx.P] + if !ok { + return curCtx, false, fmt.Errorf("uninitialized proc %d found during %s", curCtx.P, go122.EventString(ev.typ)) + } + pState.status = go122.ProcSyscall + // Validate the P sequence number on the event and advance it. + // + // We have a P sequence number for what is supposed to be a goroutine event + // so that we can correctly model P stealing. Without this sequence number here, + // the syscall from which a ProcSteal event is stealing can be ambiguous in the + // face of broken timestamps. See the go122-syscall-steal-proc-ambiguous test for + // more details. + // + // Note that because this sequence number only exists as a tool for disambiguation, + // we can enforce that we have the right sequence number at this point; we don't need + // to back off and see if any other events will advance. This is a running P. + pSeq := makeSeq(gen, ev.args[0]) + if !pSeq.succeeds(pState.seq) { + return curCtx, false, fmt.Errorf("failed to advance %s: can't make sequence: %s -> %s", go122.EventString(ev.typ), pState.seq, pSeq) + } + pState.seq = pSeq + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return curCtx, true, nil +} + +func (o *ordering) advanceGoSyscallEnd(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + // This event is always advance-able because it happens on the same + // thread that EvGoSyscallStart happened, and the goroutine can't leave + // that thread until its done. + if err := validateCtx(curCtx, event.UserGoReqs); err != nil { + return curCtx, false, err + } + state, ok := o.gStates[curCtx.G] + if !ok { + return curCtx, false, fmt.Errorf("event %s for goroutine (%v) that doesn't exist", go122.EventString(ev.typ), curCtx.G) + } + if state.status != go122.GoSyscall { + return curCtx, false, fmt.Errorf("%s event for goroutine that's not %s", go122.EventString(ev.typ), GoRunning) + } + state.status = go122.GoRunning + + // Transfer the P back to running from syscall. + pState, ok := o.pStates[curCtx.P] + if !ok { + return curCtx, false, fmt.Errorf("uninitialized proc %d found during %s", curCtx.P, go122.EventString(ev.typ)) + } + if pState.status != go122.ProcSyscall { + return curCtx, false, fmt.Errorf("expected proc %d in state %v, but got %v instead", curCtx.P, go122.ProcSyscall, pState.status) + } + pState.status = go122.ProcRunning + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return curCtx, true, nil +} + +func (o *ordering) advanceGoSyscallEndBlocked(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + // This event becomes advanceable when its P is not in a syscall state + // (lack of a P altogether is also acceptable for advancing). + // The transfer out of ProcSyscall can happen either voluntarily via + // ProcStop or involuntarily via ProcSteal. We may also acquire a new P + // before we get here (after the transfer out) but that's OK: that new + // P won't be in the ProcSyscall state anymore. + // + // Basically: while we have a preemptible P, don't advance, because we + // *know* from the event that we're going to lose it at some point during + // the syscall. We shouldn't advance until that happens. + if curCtx.P != NoProc { + pState, ok := o.pStates[curCtx.P] + if !ok { + return curCtx, false, fmt.Errorf("uninitialized proc %d found during %s", curCtx.P, go122.EventString(ev.typ)) + } + if pState.status == go122.ProcSyscall { + return curCtx, false, nil + } + } + // As mentioned above, we may have a P here if we ProcStart + // before this event. + if err := validateCtx(curCtx, event.SchedReqs{Thread: event.MustHave, Proc: event.MayHave, Goroutine: event.MustHave}); err != nil { + return curCtx, false, err + } + state, ok := o.gStates[curCtx.G] + if !ok { + return curCtx, false, fmt.Errorf("event %s for goroutine (%v) that doesn't exist", go122.EventString(ev.typ), curCtx.G) + } + if state.status != go122.GoSyscall { + return curCtx, false, fmt.Errorf("%s event for goroutine that's not %s", go122.EventString(ev.typ), GoRunning) + } + newCtx := curCtx + newCtx.G = NoGoroutine + state.status = go122.GoRunnable + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return newCtx, true, nil +} + +func (o *ordering) advanceGoCreateSyscall(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + // This event indicates that a goroutine is effectively + // being created out of a cgo callback. Such a goroutine + // is 'created' in the syscall state. + if err := validateCtx(curCtx, event.SchedReqs{Thread: event.MustHave, Proc: event.MayHave, Goroutine: event.MustNotHave}); err != nil { + return curCtx, false, err + } + // This goroutine is effectively being created. Add a state for it. + newgid := GoID(ev.args[0]) + if _, ok := o.gStates[newgid]; ok { + return curCtx, false, fmt.Errorf("tried to create goroutine (%v) in syscall that already exists", newgid) + } + o.gStates[newgid] = &gState{id: newgid, status: go122.GoSyscall, seq: makeSeq(gen, 0)} + // Goroutine is executing. Bind it to the context. + newCtx := curCtx + newCtx.G = newgid + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return newCtx, true, nil +} + +func (o *ordering) advanceGoDestroySyscall(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + // This event indicates that a goroutine created for a + // cgo callback is disappearing, either because the callback + // ending or the C thread that called it is being destroyed. + // + // Also, treat this as if we lost our P too. + // The thread ID may be reused by the platform and we'll get + // really confused if we try to steal the P is this is running + // with later. The new M with the same ID could even try to + // steal back this P from itself! + // + // The runtime is careful to make sure that any GoCreateSyscall + // event will enter the runtime emitting events for reacquiring a P. + // + // Note: we might have a P here. The P might not be released + // eagerly by the runtime, and it might get stolen back later + // (or never again, if the program is going to exit). + if err := validateCtx(curCtx, event.SchedReqs{Thread: event.MustHave, Proc: event.MayHave, Goroutine: event.MustHave}); err != nil { + return curCtx, false, err + } + // Check to make sure the goroutine exists in the right state. + state, ok := o.gStates[curCtx.G] + if !ok { + return curCtx, false, fmt.Errorf("event %s for goroutine (%v) that doesn't exist", go122.EventString(ev.typ), curCtx.G) + } + if state.status != go122.GoSyscall { + return curCtx, false, fmt.Errorf("%s event for goroutine that's not %v", go122.EventString(ev.typ), GoSyscall) + } + // This goroutine is exiting itself. + delete(o.gStates, curCtx.G) + newCtx := curCtx + newCtx.G = NoGoroutine + + // If we have a proc, then we're dissociating from it now. See the comment at the top of the case. + if curCtx.P != NoProc { + pState, ok := o.pStates[curCtx.P] + if !ok { + return curCtx, false, fmt.Errorf("found invalid proc %d during %s", curCtx.P, go122.EventString(ev.typ)) + } + if pState.status != go122.ProcSyscall { + return curCtx, false, fmt.Errorf("proc %d in unexpected state %s during %s", curCtx.P, pState.status, go122.EventString(ev.typ)) + } + // See the go122-create-syscall-reuse-thread-id test case for more details. + pState.status = go122.ProcSyscallAbandoned + newCtx.P = NoProc + + // Queue an extra self-ProcSteal event. + extra := makeEvent(evt, curCtx, go122.EvProcSteal, ev.time, uint64(curCtx.P)) + extra.base.extra(version.Go122)[0] = uint64(go122.ProcSyscall) + o.queue.push(extra) + } + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return newCtx, true, nil +} + +func (o *ordering) advanceUserTaskBegin(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + // Handle tasks. Tasks are interesting because: + // - There's no Begin event required to reference a task. + // - End for a particular task ID can appear multiple times. + // As a result, there's very little to validate. The only + // thing we have to be sure of is that a task didn't begin + // after it had already begun. Task IDs are allowed to be + // reused, so we don't care about a Begin after an End. + id := TaskID(ev.args[0]) + if _, ok := o.activeTasks[id]; ok { + return curCtx, false, fmt.Errorf("task ID conflict: %d", id) + } + // Get the parent ID, but don't validate it. There's no guarantee + // we actually have information on whether it's active. + parentID := TaskID(ev.args[1]) + if parentID == BackgroundTask { + // Note: a value of 0 here actually means no parent, *not* the + // background task. Automatic background task attachment only + // applies to regions. + parentID = NoTask + ev.args[1] = uint64(NoTask) + } + + // Validate the name and record it. We'll need to pass it through to + // EvUserTaskEnd. + nameID := stringID(ev.args[2]) + name, ok := evt.strings.get(nameID) + if !ok { + return curCtx, false, fmt.Errorf("invalid string ID %v for %v event", nameID, ev.typ) + } + o.activeTasks[id] = taskState{name: name, parentID: parentID} + if err := validateCtx(curCtx, event.UserGoReqs); err != nil { + return curCtx, false, err + } + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return curCtx, true, nil +} + +func (o *ordering) advanceUserTaskEnd(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + id := TaskID(ev.args[0]) + if ts, ok := o.activeTasks[id]; ok { + // Smuggle the task info. This may happen in a different generation, + // which may not have the name in its string table. Add it to the extra + // strings table so we can look it up later. + ev.extra(version.Go122)[0] = uint64(ts.parentID) + ev.extra(version.Go122)[1] = uint64(evt.addExtraString(ts.name)) + delete(o.activeTasks, id) + } else { + // Explicitly clear the task info. + ev.extra(version.Go122)[0] = uint64(NoTask) + ev.extra(version.Go122)[1] = uint64(evt.addExtraString("")) + } + if err := validateCtx(curCtx, event.UserGoReqs); err != nil { + return curCtx, false, err + } + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return curCtx, true, nil +} + +func (o *ordering) advanceUserRegionBegin(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + if err := validateCtx(curCtx, event.UserGoReqs); err != nil { + return curCtx, false, err + } + tid := TaskID(ev.args[0]) + nameID := stringID(ev.args[1]) + name, ok := evt.strings.get(nameID) + if !ok { + return curCtx, false, fmt.Errorf("invalid string ID %v for %v event", nameID, ev.typ) + } + gState, ok := o.gStates[curCtx.G] + if !ok { + return curCtx, false, fmt.Errorf("encountered EvUserRegionBegin without known state for current goroutine %d", curCtx.G) + } + if err := gState.beginRegion(userRegion{tid, name}); err != nil { + return curCtx, false, err + } + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return curCtx, true, nil +} + +func (o *ordering) advanceUserRegionEnd(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + if err := validateCtx(curCtx, event.UserGoReqs); err != nil { + return curCtx, false, err + } + tid := TaskID(ev.args[0]) + nameID := stringID(ev.args[1]) + name, ok := evt.strings.get(nameID) + if !ok { + return curCtx, false, fmt.Errorf("invalid string ID %v for %v event", nameID, ev.typ) + } + gState, ok := o.gStates[curCtx.G] + if !ok { + return curCtx, false, fmt.Errorf("encountered EvUserRegionEnd without known state for current goroutine %d", curCtx.G) + } + if err := gState.endRegion(userRegion{tid, name}); err != nil { + return curCtx, false, err + } + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return curCtx, true, nil +} + +// Handle the GC mark phase. +// +// We have sequence numbers for both start and end because they +// can happen on completely different threads. We want an explicit +// partial order edge between start and end here, otherwise we're +// relying entirely on timestamps to make sure we don't advance a +// GCEnd for a _different_ GC cycle if timestamps are wildly broken. +func (o *ordering) advanceGCActive(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + seq := ev.args[0] + if gen == o.initialGen { + if o.gcState != gcUndetermined { + return curCtx, false, fmt.Errorf("GCActive in the first generation isn't first GC event") + } + o.gcSeq = seq + o.gcState = gcRunning + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return curCtx, true, nil + } + if seq != o.gcSeq+1 { + // This is not the right GC cycle. + return curCtx, false, nil + } + if o.gcState != gcRunning { + return curCtx, false, fmt.Errorf("encountered GCActive while GC was not in progress") + } + o.gcSeq = seq + if err := validateCtx(curCtx, event.UserGoReqs); err != nil { + return curCtx, false, err + } + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return curCtx, true, nil +} + +func (o *ordering) advanceGCBegin(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + seq := ev.args[0] + if o.gcState == gcUndetermined { + o.gcSeq = seq + o.gcState = gcRunning + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return curCtx, true, nil + } + if seq != o.gcSeq+1 { + // This is not the right GC cycle. + return curCtx, false, nil + } + if o.gcState == gcRunning { + return curCtx, false, fmt.Errorf("encountered GCBegin while GC was already in progress") + } + o.gcSeq = seq + o.gcState = gcRunning + if err := validateCtx(curCtx, event.UserGoReqs); err != nil { + return curCtx, false, err + } + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return curCtx, true, nil +} + +func (o *ordering) advanceGCEnd(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + seq := ev.args[0] + if seq != o.gcSeq+1 { + // This is not the right GC cycle. + return curCtx, false, nil + } + if o.gcState == gcNotRunning { + return curCtx, false, fmt.Errorf("encountered GCEnd when GC was not in progress") + } + if o.gcState == gcUndetermined { + return curCtx, false, fmt.Errorf("encountered GCEnd when GC was in an undetermined state") + } + o.gcSeq = seq + o.gcState = gcNotRunning + if err := validateCtx(curCtx, event.UserGoReqs); err != nil { + return curCtx, false, err + } + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return curCtx, true, nil +} + +func (o *ordering) advanceAnnotation(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + // Handle simple instantaneous events that require a G. + if err := validateCtx(curCtx, event.UserGoReqs); err != nil { + return curCtx, false, err + } + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return curCtx, true, nil +} + +func (o *ordering) advanceHeapMetric(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + // Handle allocation metrics, which don't require a G. + if err := validateCtx(curCtx, event.SchedReqs{Thread: event.MustHave, Proc: event.MustHave, Goroutine: event.MayHave}); err != nil { + return curCtx, false, err + } + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return curCtx, true, nil +} + +func (o *ordering) advanceGCSweepBegin(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + // Handle sweep, which is bound to a P and doesn't require a G. + if err := validateCtx(curCtx, event.SchedReqs{Thread: event.MustHave, Proc: event.MustHave, Goroutine: event.MayHave}); err != nil { + return curCtx, false, err + } + if err := o.pStates[curCtx.P].beginRange(makeRangeType(ev.typ, 0)); err != nil { + return curCtx, false, err + } + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return curCtx, true, nil +} + +func (o *ordering) advanceGCSweepActive(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + pid := ProcID(ev.args[0]) + // N.B. In practice Ps can't block while they're sweeping, so this can only + // ever reference curCtx.P. However, be lenient about this like we are with + // GCMarkAssistActive; there's no reason the runtime couldn't change to block + // in the middle of a sweep. + pState, ok := o.pStates[pid] + if !ok { + return curCtx, false, fmt.Errorf("encountered GCSweepActive for unknown proc %d", pid) + } + if err := pState.activeRange(makeRangeType(ev.typ, 0), gen == o.initialGen); err != nil { + return curCtx, false, err + } + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return curCtx, true, nil +} + +func (o *ordering) advanceGCSweepEnd(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + if err := validateCtx(curCtx, event.SchedReqs{Thread: event.MustHave, Proc: event.MustHave, Goroutine: event.MayHave}); err != nil { + return curCtx, false, err + } + _, err := o.pStates[curCtx.P].endRange(ev.typ) + if err != nil { + return curCtx, false, err + } + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return curCtx, true, nil +} + +func (o *ordering) advanceGoRangeBegin(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + // Handle special goroutine-bound event ranges. + if err := validateCtx(curCtx, event.UserGoReqs); err != nil { + return curCtx, false, err + } + desc := stringID(0) + if ev.typ == go122.EvSTWBegin { + desc = stringID(ev.args[0]) + } + gState, ok := o.gStates[curCtx.G] + if !ok { + return curCtx, false, fmt.Errorf("encountered event of type %d without known state for current goroutine %d", ev.typ, curCtx.G) + } + if err := gState.beginRange(makeRangeType(ev.typ, desc)); err != nil { + return curCtx, false, err + } + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return curCtx, true, nil +} + +func (o *ordering) advanceGoRangeActive(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + gid := GoID(ev.args[0]) + // N.B. Like GoStatus, this can happen at any time, because it can + // reference a non-running goroutine. Don't check anything about the + // current scheduler context. + gState, ok := o.gStates[gid] + if !ok { + return curCtx, false, fmt.Errorf("uninitialized goroutine %d found during %s", gid, go122.EventString(ev.typ)) + } + if err := gState.activeRange(makeRangeType(ev.typ, 0), gen == o.initialGen); err != nil { + return curCtx, false, err + } + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return curCtx, true, nil +} + +func (o *ordering) advanceGoRangeEnd(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + if err := validateCtx(curCtx, event.UserGoReqs); err != nil { + return curCtx, false, err + } + gState, ok := o.gStates[curCtx.G] + if !ok { + return curCtx, false, fmt.Errorf("encountered event of type %d without known state for current goroutine %d", ev.typ, curCtx.G) + } + desc, err := gState.endRange(ev.typ) + if err != nil { + return curCtx, false, err + } + if ev.typ == go122.EvSTWEnd { + // Smuggle the kind into the event. + // Don't use ev.extra here so we have symmetry with STWBegin. + ev.args[0] = uint64(desc) + } + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return curCtx, true, nil +} + +func (o *ordering) advanceAllocFree(ev *baseEvent, evt *evTable, m ThreadID, gen uint64, curCtx schedCtx) (schedCtx, bool, error) { + // Handle simple instantaneous events that may or may not have a P. + if err := validateCtx(curCtx, event.SchedReqs{Thread: event.MustHave, Proc: event.MayHave, Goroutine: event.MayHave}); err != nil { + return curCtx, false, err + } + o.queue.push(Event{table: evt, ctx: curCtx, base: *ev}) + return curCtx, true, nil +} + +// Next returns the next event in the ordering. +func (o *ordering) Next() (Event, bool) { + return o.queue.pop() +} + +// schedCtx represents the scheduling resources associated with an event. +type schedCtx struct { + G GoID + P ProcID + M ThreadID +} + +// validateCtx ensures that ctx conforms to some reqs, returning an error if +// it doesn't. +func validateCtx(ctx schedCtx, reqs event.SchedReqs) error { + // Check thread requirements. + if reqs.Thread == event.MustHave && ctx.M == NoThread { + return fmt.Errorf("expected a thread but didn't have one") + } else if reqs.Thread == event.MustNotHave && ctx.M != NoThread { + return fmt.Errorf("expected no thread but had one") + } + + // Check proc requirements. + if reqs.Proc == event.MustHave && ctx.P == NoProc { + return fmt.Errorf("expected a proc but didn't have one") + } else if reqs.Proc == event.MustNotHave && ctx.P != NoProc { + return fmt.Errorf("expected no proc but had one") + } + + // Check goroutine requirements. + if reqs.Goroutine == event.MustHave && ctx.G == NoGoroutine { + return fmt.Errorf("expected a goroutine but didn't have one") + } else if reqs.Goroutine == event.MustNotHave && ctx.G != NoGoroutine { + return fmt.Errorf("expected no goroutine but had one") + } + return nil +} + +// gcState is a trinary variable for the current state of the GC. +// +// The third state besides "enabled" and "disabled" is "undetermined." +type gcState uint8 + +const ( + gcUndetermined gcState = iota + gcNotRunning + gcRunning +) + +// String returns a human-readable string for the GC state. +func (s gcState) String() string { + switch s { + case gcUndetermined: + return "Undetermined" + case gcNotRunning: + return "NotRunning" + case gcRunning: + return "Running" + } + return "Bad" +} + +// userRegion represents a unique user region when attached to some gState. +type userRegion struct { + // name must be a resolved string because the string ID for the same + // string may change across generations, but we care about checking + // the value itself. + taskID TaskID + name string +} + +// rangeType is a way to classify special ranges of time. +// +// These typically correspond 1:1 with "Begin" events, but +// they may have an optional subtype that describes the range +// in more detail. +type rangeType struct { + typ event.Type // "Begin" event. + desc stringID // Optional subtype. +} + +// makeRangeType constructs a new rangeType. +func makeRangeType(typ event.Type, desc stringID) rangeType { + if styp := go122.Specs()[typ].StartEv; styp != go122.EvNone { + typ = styp + } + return rangeType{typ, desc} +} + +// gState is the state of a goroutine at a point in the trace. +type gState struct { + id GoID + status go122.GoStatus + seq seqCounter + + // regions are the active user regions for this goroutine. + regions []userRegion + + // rangeState is the state of special time ranges bound to this goroutine. + rangeState +} + +// beginRegion starts a user region on the goroutine. +func (s *gState) beginRegion(r userRegion) error { + s.regions = append(s.regions, r) + return nil +} + +// endRegion ends a user region on the goroutine. +func (s *gState) endRegion(r userRegion) error { + if len(s.regions) == 0 { + // We do not know about regions that began before tracing started. + return nil + } + if next := s.regions[len(s.regions)-1]; next != r { + return fmt.Errorf("misuse of region in goroutine %v: region end %v when the inner-most active region start event is %v", s.id, r, next) + } + s.regions = s.regions[:len(s.regions)-1] + return nil +} + +// pState is the state of a proc at a point in the trace. +type pState struct { + id ProcID + status go122.ProcStatus + seq seqCounter + + // rangeState is the state of special time ranges bound to this proc. + rangeState +} + +// mState is the state of a thread at a point in the trace. +type mState struct { + g GoID // Goroutine bound to this M. (The goroutine's state is Executing.) + p ProcID // Proc bound to this M. (The proc's state is Executing.) +} + +// rangeState represents the state of special time ranges. +type rangeState struct { + // inFlight contains the rangeTypes of any ranges bound to a resource. + inFlight []rangeType +} + +// beginRange begins a special range in time on the goroutine. +// +// Returns an error if the range is already in progress. +func (s *rangeState) beginRange(typ rangeType) error { + if s.hasRange(typ) { + return fmt.Errorf("discovered event already in-flight for when starting event %v", go122.Specs()[typ.typ].Name) + } + s.inFlight = append(s.inFlight, typ) + return nil +} + +// activeRange marks special range in time on the goroutine as active in the +// initial generation, or confirms that it is indeed active in later generations. +func (s *rangeState) activeRange(typ rangeType, isInitialGen bool) error { + if isInitialGen { + if s.hasRange(typ) { + return fmt.Errorf("found named active range already in first gen: %v", typ) + } + s.inFlight = append(s.inFlight, typ) + } else if !s.hasRange(typ) { + return fmt.Errorf("resource is missing active range: %v %v", go122.Specs()[typ.typ].Name, s.inFlight) + } + return nil +} + +// hasRange returns true if a special time range on the goroutine as in progress. +func (s *rangeState) hasRange(typ rangeType) bool { + for _, ftyp := range s.inFlight { + if ftyp == typ { + return true + } + } + return false +} + +// endRange ends a special range in time on the goroutine. +// +// This must line up with the start event type of the range the goroutine is currently in. +func (s *rangeState) endRange(typ event.Type) (stringID, error) { + st := go122.Specs()[typ].StartEv + idx := -1 + for i, r := range s.inFlight { + if r.typ == st { + idx = i + break + } + } + if idx < 0 { + return 0, fmt.Errorf("tried to end event %v, but not in-flight", go122.Specs()[st].Name) + } + // Swap remove. + desc := s.inFlight[idx].desc + s.inFlight[idx], s.inFlight[len(s.inFlight)-1] = s.inFlight[len(s.inFlight)-1], s.inFlight[idx] + s.inFlight = s.inFlight[:len(s.inFlight)-1] + return desc, nil +} + +// seqCounter represents a global sequence counter for a resource. +type seqCounter struct { + gen uint64 // The generation for the local sequence counter seq. + seq uint64 // The sequence number local to the generation. +} + +// makeSeq creates a new seqCounter. +func makeSeq(gen, seq uint64) seqCounter { + return seqCounter{gen: gen, seq: seq} +} + +// succeeds returns true if a is the immediate successor of b. +func (a seqCounter) succeeds(b seqCounter) bool { + return a.gen == b.gen && a.seq == b.seq+1 +} + +// String returns a debug string representation of the seqCounter. +func (c seqCounter) String() string { + return fmt.Sprintf("%d (gen=%d)", c.seq, c.gen) +} + +func dumpOrdering(order *ordering) string { + var sb strings.Builder + for id, state := range order.gStates { + fmt.Fprintf(&sb, "G %d [status=%s seq=%s]\n", id, state.status, state.seq) + } + fmt.Fprintln(&sb) + for id, state := range order.pStates { + fmt.Fprintf(&sb, "P %d [status=%s seq=%s]\n", id, state.status, state.seq) + } + fmt.Fprintln(&sb) + for id, state := range order.mStates { + fmt.Fprintf(&sb, "M %d [g=%d p=%d]\n", id, state.g, state.p) + } + fmt.Fprintln(&sb) + fmt.Fprintf(&sb, "GC %d %s\n", order.gcSeq, order.gcState) + return sb.String() +} + +// taskState represents an active task. +type taskState struct { + // name is the type of the active task. + name string + + // parentID is the parent ID of the active task. + parentID TaskID +} + +// queue implements a growable ring buffer with a queue API. +type queue[T any] struct { + start, end int + buf []T +} + +// push adds a new event to the back of the queue. +func (q *queue[T]) push(value T) { + if q.end-q.start == len(q.buf) { + q.grow() + } + q.buf[q.end%len(q.buf)] = value + q.end++ +} + +// grow increases the size of the queue. +func (q *queue[T]) grow() { + if len(q.buf) == 0 { + q.buf = make([]T, 2) + return + } + + // Create new buf and copy data over. + newBuf := make([]T, len(q.buf)*2) + pivot := q.start % len(q.buf) + first, last := q.buf[pivot:], q.buf[:pivot] + copy(newBuf[:len(first)], first) + copy(newBuf[len(first):], last) + + // Update the queue state. + q.start = 0 + q.end = len(q.buf) + q.buf = newBuf +} + +// pop removes an event from the front of the queue. If the +// queue is empty, it returns an EventBad event. +func (q *queue[T]) pop() (T, bool) { + if q.end-q.start == 0 { + return *new(T), false + } + elem := &q.buf[q.start%len(q.buf)] + value := *elem + *elem = *new(T) // Clear the entry before returning, so we don't hold onto old tables. + q.start++ + return value, true +} + +// makeEvent creates an Event from the provided information. +// +// It's just a convenience function; it's always OK to construct +// an Event manually if this isn't quite the right way to express +// the contents of the event. +func makeEvent(table *evTable, ctx schedCtx, typ event.Type, time Time, args ...uint64) Event { + ev := Event{ + table: table, + ctx: ctx, + base: baseEvent{ + typ: typ, + time: time, + }, + } + copy(ev.base.args[:], args) + return ev +} diff --git a/vendor/golang.org/x/exp/trace/parser.go b/vendor/golang.org/x/exp/trace/parser.go new file mode 100644 index 000000000000..b3bdd139e4f5 --- /dev/null +++ b/vendor/golang.org/x/exp/trace/parser.go @@ -0,0 +1,83 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by "gen.bash" from internal/trace; DO NOT EDIT. + +//go:build go1.21 + +package trace + +// Frame is a frame in stack traces. +type Frame struct { + PC uint64 + Fn string + File string + Line int +} + +const ( + // Special P identifiers: + FakeP = 1000000 + iota + TimerP // depicts timer unblocks + NetpollP // depicts network unblocks + SyscallP // depicts returns from syscalls + GCP // depicts GC state + ProfileP // depicts recording of CPU profile samples +) + +// Event types in the trace. +// Verbatim copy from src/runtime/trace.go with the "trace" prefix removed. +const ( + EvNone = 0 // unused + EvBatch = 1 // start of per-P batch of events [pid, timestamp] + EvFrequency = 2 // contains tracer timer frequency [frequency (ticks per second)] + EvStack = 3 // stack [stack id, number of PCs, array of {PC, func string ID, file string ID, line}] + EvGomaxprocs = 4 // current value of GOMAXPROCS [timestamp, GOMAXPROCS, stack id] + EvProcStart = 5 // start of P [timestamp, thread id] + EvProcStop = 6 // stop of P [timestamp] + EvGCStart = 7 // GC start [timestamp, seq, stack id] + EvGCDone = 8 // GC done [timestamp] + EvSTWStart = 9 // GC mark termination start [timestamp, kind] + EvSTWDone = 10 // GC mark termination done [timestamp] + EvGCSweepStart = 11 // GC sweep start [timestamp, stack id] + EvGCSweepDone = 12 // GC sweep done [timestamp, swept, reclaimed] + EvGoCreate = 13 // goroutine creation [timestamp, new goroutine id, new stack id, stack id] + EvGoStart = 14 // goroutine starts running [timestamp, goroutine id, seq] + EvGoEnd = 15 // goroutine ends [timestamp] + EvGoStop = 16 // goroutine stops (like in select{}) [timestamp, stack] + EvGoSched = 17 // goroutine calls Gosched [timestamp, stack] + EvGoPreempt = 18 // goroutine is preempted [timestamp, stack] + EvGoSleep = 19 // goroutine calls Sleep [timestamp, stack] + EvGoBlock = 20 // goroutine blocks [timestamp, stack] + EvGoUnblock = 21 // goroutine is unblocked [timestamp, goroutine id, seq, stack] + EvGoBlockSend = 22 // goroutine blocks on chan send [timestamp, stack] + EvGoBlockRecv = 23 // goroutine blocks on chan recv [timestamp, stack] + EvGoBlockSelect = 24 // goroutine blocks on select [timestamp, stack] + EvGoBlockSync = 25 // goroutine blocks on Mutex/RWMutex [timestamp, stack] + EvGoBlockCond = 26 // goroutine blocks on Cond [timestamp, stack] + EvGoBlockNet = 27 // goroutine blocks on network [timestamp, stack] + EvGoSysCall = 28 // syscall enter [timestamp, stack] + EvGoSysExit = 29 // syscall exit [timestamp, goroutine id, seq, real timestamp] + EvGoSysBlock = 30 // syscall blocks [timestamp] + EvGoWaiting = 31 // denotes that goroutine is blocked when tracing starts [timestamp, goroutine id] + EvGoInSyscall = 32 // denotes that goroutine is in syscall when tracing starts [timestamp, goroutine id] + EvHeapAlloc = 33 // gcController.heapLive change [timestamp, heap live bytes] + EvHeapGoal = 34 // gcController.heapGoal change [timestamp, heap goal bytes] + EvTimerGoroutine = 35 // denotes timer goroutine [timer goroutine id] + EvFutileWakeup = 36 // denotes that the previous wakeup of this goroutine was futile [timestamp] + EvString = 37 // string dictionary entry [ID, length, string] + EvGoStartLocal = 38 // goroutine starts running on the same P as the last event [timestamp, goroutine id] + EvGoUnblockLocal = 39 // goroutine is unblocked on the same P as the last event [timestamp, goroutine id, stack] + EvGoSysExitLocal = 40 // syscall exit on the same P as the last event [timestamp, goroutine id, real timestamp] + EvGoStartLabel = 41 // goroutine starts running with label [timestamp, goroutine id, seq, label string id] + EvGoBlockGC = 42 // goroutine blocks on GC assist [timestamp, stack] + EvGCMarkAssistStart = 43 // GC mark assist start [timestamp, stack] + EvGCMarkAssistDone = 44 // GC mark assist done [timestamp] + EvUserTaskCreate = 45 // trace.NewTask [timestamp, internal task id, internal parent id, name string, stack] + EvUserTaskEnd = 46 // end of task [timestamp, internal task id, stack] + EvUserRegion = 47 // trace.WithRegion [timestamp, internal task id, mode(0:start, 1:end), name string, stack] + EvUserLog = 48 // trace.Log [timestamp, internal id, key string id, stack, value string] + EvCPUSample = 49 // CPU profiling sample [timestamp, real timestamp, real P id (-1 when absent), goroutine id, stack] + EvCount = 50 +) diff --git a/vendor/golang.org/x/exp/trace/reader.go b/vendor/golang.org/x/exp/trace/reader.go new file mode 100644 index 000000000000..a5d2daf652a2 --- /dev/null +++ b/vendor/golang.org/x/exp/trace/reader.go @@ -0,0 +1,242 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by "gen.bash" from internal/trace; DO NOT EDIT. + +//go:build go1.21 + +package trace + +import ( + "bufio" + "fmt" + "io" + "slices" + "strings" + + "golang.org/x/exp/trace/internal/event/go122" + "golang.org/x/exp/trace/internal/oldtrace" + "golang.org/x/exp/trace/internal/version" +) + +// Reader reads a byte stream, validates it, and produces trace events. +type Reader struct { + r *bufio.Reader + lastTs Time + gen *generation + spill *spilledBatch + spillErr error // error from reading spill + frontier []*batchCursor + cpuSamples []cpuSample + order ordering + emittedSync bool + + go121Events *oldTraceConverter +} + +// NewReader creates a new trace reader. +func NewReader(r io.Reader) (*Reader, error) { + br := bufio.NewReader(r) + v, err := version.ReadHeader(br) + if err != nil { + return nil, err + } + switch v { + case version.Go111, version.Go119, version.Go121: + tr, err := oldtrace.Parse(br, v) + if err != nil { + return nil, err + } + return &Reader{ + go121Events: convertOldFormat(tr), + }, nil + case version.Go122, version.Go123: + return &Reader{ + r: br, + order: ordering{ + mStates: make(map[ThreadID]*mState), + pStates: make(map[ProcID]*pState), + gStates: make(map[GoID]*gState), + activeTasks: make(map[TaskID]taskState), + }, + // Don't emit a sync event when we first go to emit events. + emittedSync: true, + }, nil + default: + return nil, fmt.Errorf("unknown or unsupported version go 1.%d", v) + } +} + +// ReadEvent reads a single event from the stream. +// +// If the stream has been exhausted, it returns an invalid +// event and io.EOF. +func (r *Reader) ReadEvent() (e Event, err error) { + if r.go121Events != nil { + ev, err := r.go121Events.next() + if err != nil { + // XXX do we have to emit an EventSync when the trace is done? + return Event{}, err + } + return ev, nil + } + + // Go 1.22+ trace parsing algorithm. + // + // (1) Read in all the batches for the next generation from the stream. + // (a) Use the size field in the header to quickly find all batches. + // (2) Parse out the strings, stacks, CPU samples, and timestamp conversion data. + // (3) Group each event batch by M, sorted by timestamp. (batchCursor contains the groups.) + // (4) Organize batchCursors in a min-heap, ordered by the timestamp of the next event for each M. + // (5) Try to advance the next event for the M at the top of the min-heap. + // (a) On success, select that M. + // (b) On failure, sort the min-heap and try to advance other Ms. Select the first M that advances. + // (c) If there's nothing left to advance, goto (1). + // (6) Select the latest event for the selected M and get it ready to be returned. + // (7) Read the next event for the selected M and update the min-heap. + // (8) Return the selected event, goto (5) on the next call. + + // Set us up to track the last timestamp and fix up + // the timestamp of any event that comes through. + defer func() { + if err != nil { + return + } + if err = e.validateTableIDs(); err != nil { + return + } + if e.base.time <= r.lastTs { + e.base.time = r.lastTs + 1 + } + r.lastTs = e.base.time + }() + + // Consume any events in the ordering first. + if ev, ok := r.order.Next(); ok { + return ev, nil + } + + // Check if we need to refresh the generation. + if len(r.frontier) == 0 && len(r.cpuSamples) == 0 { + if !r.emittedSync { + r.emittedSync = true + return syncEvent(r.gen.evTable, r.lastTs), nil + } + if r.spillErr != nil { + return Event{}, r.spillErr + } + if r.gen != nil && r.spill == nil { + // If we have a generation from the last read, + // and there's nothing left in the frontier, and + // there's no spilled batch, indicating that there's + // no further generation, it means we're done. + // Return io.EOF. + return Event{}, io.EOF + } + // Read the next generation. + var err error + r.gen, r.spill, err = readGeneration(r.r, r.spill) + if r.gen == nil { + return Event{}, err + } + r.spillErr = err + + // Reset CPU samples cursor. + r.cpuSamples = r.gen.cpuSamples + + // Reset frontier. + for _, m := range r.gen.batchMs { + batches := r.gen.batches[m] + bc := &batchCursor{m: m} + ok, err := bc.nextEvent(batches, r.gen.freq) + if err != nil { + return Event{}, err + } + if !ok { + // Turns out there aren't actually any events in these batches. + continue + } + r.frontier = heapInsert(r.frontier, bc) + } + + // Reset emittedSync. + r.emittedSync = false + } + tryAdvance := func(i int) (bool, error) { + bc := r.frontier[i] + + if ok, err := r.order.Advance(&bc.ev, r.gen.evTable, bc.m, r.gen.gen); !ok || err != nil { + return ok, err + } + + // Refresh the cursor's event. + ok, err := bc.nextEvent(r.gen.batches[bc.m], r.gen.freq) + if err != nil { + return false, err + } + if ok { + // If we successfully refreshed, update the heap. + heapUpdate(r.frontier, i) + } else { + // There's nothing else to read. Delete this cursor from the frontier. + r.frontier = heapRemove(r.frontier, i) + } + return true, nil + } + // Inject a CPU sample if it comes next. + if len(r.cpuSamples) != 0 { + if len(r.frontier) == 0 || r.cpuSamples[0].time < r.frontier[0].ev.time { + e := r.cpuSamples[0].asEvent(r.gen.evTable) + r.cpuSamples = r.cpuSamples[1:] + return e, nil + } + } + // Try to advance the head of the frontier, which should have the minimum timestamp. + // This should be by far the most common case + if len(r.frontier) == 0 { + return Event{}, fmt.Errorf("broken trace: frontier is empty:\n[gen=%d]\n\n%s\n%s\n", r.gen.gen, dumpFrontier(r.frontier), dumpOrdering(&r.order)) + } + if ok, err := tryAdvance(0); err != nil { + return Event{}, err + } else if !ok { + // Try to advance the rest of the frontier, in timestamp order. + // + // To do this, sort the min-heap. A sorted min-heap is still a + // min-heap, but now we can iterate over the rest and try to + // advance in order. This path should be rare. + slices.SortFunc(r.frontier, (*batchCursor).compare) + success := false + for i := 1; i < len(r.frontier); i++ { + if ok, err = tryAdvance(i); err != nil { + return Event{}, err + } else if ok { + success = true + break + } + } + if !success { + return Event{}, fmt.Errorf("broken trace: failed to advance: frontier:\n[gen=%d]\n\n%s\n%s\n", r.gen.gen, dumpFrontier(r.frontier), dumpOrdering(&r.order)) + } + } + + // Pick off the next event on the queue. At this point, one must exist. + ev, ok := r.order.Next() + if !ok { + panic("invariant violation: advance successful, but queue is empty") + } + return ev, nil +} + +func dumpFrontier(frontier []*batchCursor) string { + var sb strings.Builder + for _, bc := range frontier { + spec := go122.Specs()[bc.ev.typ] + fmt.Fprintf(&sb, "M %d [%s time=%d", bc.m, spec.Name, bc.ev.time) + for i, arg := range spec.Args[1:] { + fmt.Fprintf(&sb, " %s=%d", arg, bc.ev.args[i]) + } + fmt.Fprintf(&sb, "]\n") + } + return sb.String() +} diff --git a/vendor/golang.org/x/exp/trace/resources.go b/vendor/golang.org/x/exp/trace/resources.go new file mode 100644 index 000000000000..f5ebc8ee70a3 --- /dev/null +++ b/vendor/golang.org/x/exp/trace/resources.go @@ -0,0 +1,278 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by "gen.bash" from internal/trace; DO NOT EDIT. + +//go:build go1.21 + +package trace + +import "fmt" + +// ThreadID is the runtime-internal M structure's ID. This is unique +// for each OS thread. +type ThreadID int64 + +// NoThread indicates that the relevant events don't correspond to any +// thread in particular. +const NoThread = ThreadID(-1) + +// ProcID is the runtime-internal G structure's id field. This is unique +// for each P. +type ProcID int64 + +// NoProc indicates that the relevant events don't correspond to any +// P in particular. +const NoProc = ProcID(-1) + +// GoID is the runtime-internal G structure's goid field. This is unique +// for each goroutine. +type GoID int64 + +// NoGoroutine indicates that the relevant events don't correspond to any +// goroutine in particular. +const NoGoroutine = GoID(-1) + +// GoState represents the state of a goroutine. +// +// New GoStates may be added in the future. Users of this type must be robust +// to that possibility. +type GoState uint8 + +const ( + GoUndetermined GoState = iota // No information is known about the goroutine. + GoNotExist // Goroutine does not exist. + GoRunnable // Goroutine is runnable but not running. + GoRunning // Goroutine is running. + GoWaiting // Goroutine is waiting on something to happen. + GoSyscall // Goroutine is in a system call. +) + +// Executing returns true if the state indicates that the goroutine is executing +// and bound to its thread. +func (s GoState) Executing() bool { + return s == GoRunning || s == GoSyscall +} + +// String returns a human-readable representation of a GoState. +// +// The format of the returned string is for debugging purposes and is subject to change. +func (s GoState) String() string { + switch s { + case GoUndetermined: + return "Undetermined" + case GoNotExist: + return "NotExist" + case GoRunnable: + return "Runnable" + case GoRunning: + return "Running" + case GoWaiting: + return "Waiting" + case GoSyscall: + return "Syscall" + } + return "Bad" +} + +// ProcState represents the state of a proc. +// +// New ProcStates may be added in the future. Users of this type must be robust +// to that possibility. +type ProcState uint8 + +const ( + ProcUndetermined ProcState = iota // No information is known about the proc. + ProcNotExist // Proc does not exist. + ProcRunning // Proc is running. + ProcIdle // Proc is idle. +) + +// Executing returns true if the state indicates that the proc is executing +// and bound to its thread. +func (s ProcState) Executing() bool { + return s == ProcRunning +} + +// String returns a human-readable representation of a ProcState. +// +// The format of the returned string is for debugging purposes and is subject to change. +func (s ProcState) String() string { + switch s { + case ProcUndetermined: + return "Undetermined" + case ProcNotExist: + return "NotExist" + case ProcRunning: + return "Running" + case ProcIdle: + return "Idle" + } + return "Bad" +} + +// ResourceKind indicates a kind of resource that has a state machine. +// +// New ResourceKinds may be added in the future. Users of this type must be robust +// to that possibility. +type ResourceKind uint8 + +const ( + ResourceNone ResourceKind = iota // No resource. + ResourceGoroutine // Goroutine. + ResourceProc // Proc. + ResourceThread // Thread. +) + +// String returns a human-readable representation of a ResourceKind. +// +// The format of the returned string is for debugging purposes and is subject to change. +func (r ResourceKind) String() string { + switch r { + case ResourceNone: + return "None" + case ResourceGoroutine: + return "Goroutine" + case ResourceProc: + return "Proc" + case ResourceThread: + return "Thread" + } + return "Bad" +} + +// ResourceID represents a generic resource ID. +type ResourceID struct { + // Kind is the kind of resource this ID is for. + Kind ResourceKind + id int64 +} + +// MakeResourceID creates a general resource ID from a specific resource's ID. +func MakeResourceID[T interface{ GoID | ProcID | ThreadID }](id T) ResourceID { + var rd ResourceID + var a any = id + switch a.(type) { + case GoID: + rd.Kind = ResourceGoroutine + case ProcID: + rd.Kind = ResourceProc + case ThreadID: + rd.Kind = ResourceThread + } + rd.id = int64(id) + return rd +} + +// Goroutine obtains a GoID from the resource ID. +// +// r.Kind must be ResourceGoroutine or this function will panic. +func (r ResourceID) Goroutine() GoID { + if r.Kind != ResourceGoroutine { + panic(fmt.Sprintf("attempted to get GoID from %s resource ID", r.Kind)) + } + return GoID(r.id) +} + +// Proc obtains a ProcID from the resource ID. +// +// r.Kind must be ResourceProc or this function will panic. +func (r ResourceID) Proc() ProcID { + if r.Kind != ResourceProc { + panic(fmt.Sprintf("attempted to get ProcID from %s resource ID", r.Kind)) + } + return ProcID(r.id) +} + +// Thread obtains a ThreadID from the resource ID. +// +// r.Kind must be ResourceThread or this function will panic. +func (r ResourceID) Thread() ThreadID { + if r.Kind != ResourceThread { + panic(fmt.Sprintf("attempted to get ThreadID from %s resource ID", r.Kind)) + } + return ThreadID(r.id) +} + +// String returns a human-readable string representation of the ResourceID. +// +// This representation is subject to change and is intended primarily for debugging. +func (r ResourceID) String() string { + if r.Kind == ResourceNone { + return r.Kind.String() + } + return fmt.Sprintf("%s(%d)", r.Kind, r.id) +} + +// StateTransition provides details about a StateTransition event. +type StateTransition struct { + // Resource is the resource this state transition is for. + Resource ResourceID + + // Reason is a human-readable reason for the state transition. + Reason string + + // Stack is the stack trace of the resource making the state transition. + // + // This is distinct from the result (Event).Stack because it pertains to + // the transitioning resource, not any of the ones executing the event + // this StateTransition came from. + // + // An example of this difference is the NotExist -> Runnable transition for + // goroutines, which indicates goroutine creation. In this particular case, + // a Stack here would refer to the starting stack of the new goroutine, and + // an (Event).Stack would refer to the stack trace of whoever created the + // goroutine. + Stack Stack + + // The actual transition data. Stored in a neutral form so that + // we don't need fields for every kind of resource. + id int64 + oldState uint8 + newState uint8 +} + +func goStateTransition(id GoID, from, to GoState) StateTransition { + return StateTransition{ + Resource: ResourceID{Kind: ResourceGoroutine, id: int64(id)}, + oldState: uint8(from), + newState: uint8(to), + } +} + +func procStateTransition(id ProcID, from, to ProcState) StateTransition { + return StateTransition{ + Resource: ResourceID{Kind: ResourceProc, id: int64(id)}, + oldState: uint8(from), + newState: uint8(to), + } +} + +// Goroutine returns the state transition for a goroutine. +// +// Transitions to and from states that are Executing are special in that +// they change the future execution context. In other words, future events +// on the same thread will feature the same goroutine until it stops running. +// +// Panics if d.Resource.Kind is not ResourceGoroutine. +func (d StateTransition) Goroutine() (from, to GoState) { + if d.Resource.Kind != ResourceGoroutine { + panic("Goroutine called on non-Goroutine state transition") + } + return GoState(d.oldState), GoState(d.newState) +} + +// Proc returns the state transition for a proc. +// +// Transitions to and from states that are Executing are special in that +// they change the future execution context. In other words, future events +// on the same thread will feature the same goroutine until it stops running. +// +// Panics if d.Resource.Kind is not ResourceProc. +func (d StateTransition) Proc() (from, to ProcState) { + if d.Resource.Kind != ResourceProc { + panic("Proc called on non-Proc state transition") + } + return ProcState(d.oldState), ProcState(d.newState) +} diff --git a/vendor/golang.org/x/exp/trace/value.go b/vendor/golang.org/x/exp/trace/value.go new file mode 100644 index 000000000000..577105d8e849 --- /dev/null +++ b/vendor/golang.org/x/exp/trace/value.go @@ -0,0 +1,57 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by "gen.bash" from internal/trace; DO NOT EDIT. + +//go:build go1.21 + +package trace + +import "fmt" + +// Value is a dynamically-typed value obtained from a trace. +type Value struct { + kind ValueKind + scalar uint64 +} + +// ValueKind is the type of a dynamically-typed value from a trace. +type ValueKind uint8 + +const ( + ValueBad ValueKind = iota + ValueUint64 +) + +// Kind returns the ValueKind of the value. +// +// It represents the underlying structure of the value. +// +// New ValueKinds may be added in the future. Users of this type must be robust +// to that possibility. +func (v Value) Kind() ValueKind { + return v.kind +} + +// Uint64 returns the uint64 value for a MetricSampleUint64. +// +// Panics if this metric sample's Kind is not MetricSampleUint64. +func (v Value) Uint64() uint64 { + if v.kind != ValueUint64 { + panic("Uint64 called on Value of a different Kind") + } + return v.scalar +} + +// valueAsString produces a debug string value. +// +// This isn't just Value.String because we may want to use that to store +// string values in the future. +func valueAsString(v Value) string { + switch v.Kind() { + case ValueUint64: + return fmt.Sprintf("Uint64(%d)", v.scalar) + } + return "Bad" +} diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE index 6a66aea5eafe..2a7cf70da6e4 100644 --- a/vendor/golang.org/x/mod/LICENSE +++ b/vendor/golang.org/x/mod/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE index 6a66aea5eafe..2a7cf70da6e4 100644 --- a/vendor/golang.org/x/sync/LICENSE +++ b/vendor/golang.org/x/sync/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/modules.txt b/vendor/modules.txt index 714589c729ef..6db55dee91a6 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -922,8 +922,15 @@ golang.org/x/crypto/pkcs12/internal/rc2 golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf -# golang.org/x/mod v0.17.0 -## explicit; go 1.18 +# golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 +## explicit; go 1.22.0 +golang.org/x/exp/trace +golang.org/x/exp/trace/internal/event +golang.org/x/exp/trace/internal/event/go122 +golang.org/x/exp/trace/internal/oldtrace +golang.org/x/exp/trace/internal/version +# golang.org/x/mod v0.21.0 +## explicit; go 1.22.0 golang.org/x/mod/semver # golang.org/x/net v0.25.0 ## explicit; go 1.18 @@ -933,7 +940,7 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/sync v0.7.0 +# golang.org/x/sync v0.8.0 ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/semaphore