diff --git a/go.mod b/go.mod index ffe050f3abafe..134cf9cf2d3b8 100644 --- a/go.mod +++ b/go.mod @@ -92,12 +92,12 @@ require ( go.universe.tf/metallb v0.11.0 golang.org/x/crypto v0.5.0 golang.org/x/exp v0.0.0-20221106115401-f9659909a136 - golang.org/x/net v0.5.0 + golang.org/x/net v0.6.0 golang.org/x/sync v0.1.0 golang.org/x/sys v0.5.0 golang.org/x/term v0.5.0 golang.org/x/time v0.3.0 - golang.org/x/tools v0.5.0 + golang.org/x/tools v0.6.0 golang.zx2c4.com/wireguard v0.0.0-20211017052713-f87e87af0d9a golang.zx2c4.com/wireguard/wgctrl v0.0.0-20211027115401-c9b1ec1aa6d8 google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef @@ -229,9 +229,9 @@ require ( go.mongodb.org/mongo-driver v1.10.0 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/zap v1.24.0 // indirect - golang.org/x/mod v0.7.0 // indirect + golang.org/x/mod v0.8.0 // indirect golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 // indirect - golang.org/x/text v0.6.0 // indirect + golang.org/x/text v0.7.0 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 307d1fa3fc45e..cf713161f4883 100644 --- a/go.sum +++ b/go.sum @@ -1061,8 +1061,8 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1125,8 +1125,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0 h1:L4ZwwTvKW9gr0ZMS1yrHD9GZhIuVjOBBnaKH+SPQK0Q= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1272,8 +1272,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1345,8 +1345,8 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.5.0 h1:+bSpV5HIeWkuvgaMfI3UmKRThoTA5ODJTUd8T17NO+4= -golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= +golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go index 291c91908d78f..46a89eda6c199 100644 --- a/vendor/golang.org/x/net/html/parse.go +++ b/vendor/golang.org/x/net/html/parse.go @@ -184,7 +184,7 @@ func (p *parser) clearStackToContext(s scope) { } } -// parseGenericRawTextElements implements the generic raw text element parsing +// parseGenericRawTextElement implements the generic raw text element parsing // algorithm defined in 12.2.6.2. // https://html.spec.whatwg.org/multipage/parsing.html#parsing-elements-that-contain-only-text // TODO: Since both RAWTEXT and RCDATA states are treated as tokenizer's part diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go index 750ac52f2a526..b7dbd186957ef 100644 --- a/vendor/golang.org/x/net/http2/flow.go +++ b/vendor/golang.org/x/net/http2/flow.go @@ -18,7 +18,7 @@ type inflow struct { unsent int32 } -// set sets the initial window. +// init sets the initial window. func (f *inflow) init(n int32) { f.avail = n } diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go index ebdfbee964ae3..b184a2771a12b 100644 --- a/vendor/golang.org/x/net/http2/hpack/hpack.go +++ b/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -211,7 +211,7 @@ func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { return dt.ents[dt.len()-(int(i)-staticTable.len())], true } -// Decode decodes an entire block. +// DecodeFull decodes an entire block. // // TODO: remove this method and make it incremental later? This is // easier for debugging now. diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index b624dc0a705e1..9bd7035bfe865 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -2192,7 +2192,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r tlsState = sc.tlsState } - needsContinue := rp.header.Get("Expect") == "100-continue" + needsContinue := httpguts.HeaderValuesContainsToken(rp.header["Expect"], "100-continue") if needsContinue { rp.header.Del("Expect") } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index b43ec10cfed95..05ba23d3d9886 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -1569,7 +1569,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) { close(cs.donec) } -// awaitOpenSlotForStream waits until len(streams) < maxConcurrentStreams. +// awaitOpenSlotForStreamLocked waits until len(streams) < maxConcurrentStreams. // Must hold cc.mu. func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { for { diff --git a/vendor/golang.org/x/net/icmp/multipart.go b/vendor/golang.org/x/net/icmp/multipart.go index 5f36675594ef5..c7b72bf3dd4bc 100644 --- a/vendor/golang.org/x/net/icmp/multipart.go +++ b/vendor/golang.org/x/net/icmp/multipart.go @@ -33,7 +33,7 @@ func multipartMessageBodyDataLen(proto int, withOrigDgram bool, b []byte, exts [ } // multipartMessageOrigDatagramLen takes b as an original datagram, -// and returns a required length for a padded orignal datagram in wire +// and returns a required length for a padded original datagram in wire // format. func multipartMessageOrigDatagramLen(proto int, b []byte) int { roundup := func(b []byte, align int) int { diff --git a/vendor/golang.org/x/net/ipv6/dgramopt.go b/vendor/golang.org/x/net/ipv6/dgramopt.go index 1f422e71dc351..846f0e1f9cdf2 100644 --- a/vendor/golang.org/x/net/ipv6/dgramopt.go +++ b/vendor/golang.org/x/net/ipv6/dgramopt.go @@ -245,7 +245,7 @@ func (c *dgramOpt) Checksum() (on bool, offset int, err error) { return true, offset, nil } -// SetChecksum enables the kernel checksum processing. If on is ture, +// SetChecksum enables the kernel checksum processing. If on is true, // the offset should be an offset in bytes into the data of where the // checksum field is located. func (c *dgramOpt) SetChecksum(on bool, offset int) error { diff --git a/vendor/golang.org/x/net/netutil/listen.go b/vendor/golang.org/x/net/netutil/listen.go index d5dfbab24fd4b..f8b779ea27eed 100644 --- a/vendor/golang.org/x/net/netutil/listen.go +++ b/vendor/golang.org/x/net/netutil/listen.go @@ -29,7 +29,7 @@ type limitListener struct { } // acquire acquires the limiting semaphore. Returns true if successfully -// accquired, false if the listener is closed and the semaphore is not +// acquired, false if the listener is closed and the semaphore is not // acquired. func (l *limitListener) acquire() bool { select { diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go index 9bf4286c794b8..d6c71101e4d1a 100644 --- a/vendor/golang.org/x/net/trace/histogram.go +++ b/vendor/golang.org/x/net/trace/histogram.go @@ -32,7 +32,7 @@ type histogram struct { valueCount int64 // number of values recorded for single value } -// AddMeasurement records a value measurement observation to the histogram. +// addMeasurement records a value measurement observation to the histogram. func (h *histogram) addMeasurement(value int64) { // TODO: assert invariant h.sum += value diff --git a/vendor/golang.org/x/text/internal/language/compact/language.go b/vendor/golang.org/x/text/internal/language/compact/language.go index 83816a72a8a06..8c1b6666fb8a6 100644 --- a/vendor/golang.org/x/text/internal/language/compact/language.go +++ b/vendor/golang.org/x/text/internal/language/compact/language.go @@ -118,7 +118,7 @@ func (t Tag) Parent() Tag { return Tag{language: lang, locale: lang} } -// returns token t and the rest of the string. +// nextToken returns token t and the rest of the string. func nextToken(s string) (t, tail string) { p := strings.Index(s[1:], "-") if p == -1 { diff --git a/vendor/golang.org/x/text/internal/language/language.go b/vendor/golang.org/x/text/internal/language/language.go index 6105bc7fadc11..09d41c73670d4 100644 --- a/vendor/golang.org/x/text/internal/language/language.go +++ b/vendor/golang.org/x/text/internal/language/language.go @@ -409,7 +409,7 @@ func (t Tag) SetTypeForKey(key, value string) (Tag, error) { return t, nil } -// findKeyAndType returns the start and end position for the type corresponding +// findTypeForKey returns the start and end position for the type corresponding // to key or the point at which to insert the key-value pair if the type // wasn't found. The hasExt return value reports whether an -u extension was present. // Note: the extensions are typically very small and are likely to contain diff --git a/vendor/golang.org/x/text/language/language.go b/vendor/golang.org/x/text/language/language.go index 289b3a36d524b..4d9c6612129bf 100644 --- a/vendor/golang.org/x/text/language/language.go +++ b/vendor/golang.org/x/text/language/language.go @@ -344,7 +344,7 @@ func (t Tag) Parent() Tag { return Tag(compact.Tag(t).Parent()) } -// returns token t and the rest of the string. +// nextToken returns token t and the rest of the string. func nextToken(s string) (t, tail string) { p := strings.Index(s[1:], "-") if p == -1 { diff --git a/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go b/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go index 2ea630608c486..e127a42b97a74 100644 --- a/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go +++ b/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go @@ -206,7 +206,7 @@ func (versionFlag) Get() interface{} { return nil } func (versionFlag) String() string { return "" } func (versionFlag) Set(s string) error { if s != "full" { - log.Fatalf("unsupported flag value: -V=%s", s) + log.Fatalf("unsupported flag value: -V=%s (use -V=full)", s) } // This replicates the minimal subset of @@ -218,7 +218,10 @@ func (versionFlag) Set(s string) error { // Formats: // $progname version devel ... buildID=... // $progname version go1.9.1 - progname := os.Args[0] + progname, err := os.Executable() + if err != nil { + return err + } f, err := os.Open(progname) if err != nil { log.Fatal(err) diff --git a/vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go b/vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go index cf76bdebe711b..8f49e8fc76b3d 100644 --- a/vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go +++ b/vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go @@ -15,7 +15,6 @@ import ( "flag" "fmt" "go/format" - "go/parser" "go/token" "go/types" "io/ioutil" @@ -33,6 +32,8 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/internal/analysisflags" "golang.org/x/tools/go/packages" + "golang.org/x/tools/internal/diff" + "golang.org/x/tools/internal/robustio" ) var ( @@ -308,6 +309,9 @@ func analyze(pkgs []*packages.Package, analyzers []*analysis.Analyzer) []*action } func applyFixes(roots []*action) error { + // visit all of the actions and accumulate the suggested edits. + paths := make(map[robustio.FileID]string) + editsByAction := make(map[robustio.FileID]map[*action][]diff.Edit) visited := make(map[*action]bool) var apply func(*action) error var visitAll func(actions []*action) error @@ -326,76 +330,43 @@ func applyFixes(roots []*action) error { return nil } - // TODO(matloob): Is this tree business too complicated? (After all this is Go!) - // Just create a set (map) of edits, sort by pos and call it a day? - type offsetedit struct { - start, end int - newText []byte - } // TextEdit using byteOffsets instead of pos - type node struct { - edit offsetedit - left, right *node - } - // Edits x and y are equivalent. - equiv := func(x, y offsetedit) bool { - return x.start == y.start && x.end == y.end && bytes.Equal(x.newText, y.newText) - } - - var insert func(tree **node, edit offsetedit) error - insert = func(treeptr **node, edit offsetedit) error { - if *treeptr == nil { - *treeptr = &node{edit, nil, nil} - return nil - } - tree := *treeptr - if edit.end <= tree.edit.start { - return insert(&tree.left, edit) - } else if edit.start >= tree.edit.end { - return insert(&tree.right, edit) - } - if equiv(edit, tree.edit) { // equivalent edits? - // We skip over equivalent edits without considering them - // an error. This handles identical edits coming from the - // multiple ways of loading a package into a - // *go/packages.Packages for testing, e.g. packages "p" and "p [p.test]". - return nil - } - - // Overlapping text edit. - return fmt.Errorf("analyses applying overlapping text edits affecting pos range (%v, %v) and (%v, %v)", - edit.start, edit.end, tree.edit.start, tree.edit.end) - - } - - editsForFile := make(map[*token.File]*node) - apply = func(act *action) error { + editsForTokenFile := make(map[*token.File][]diff.Edit) for _, diag := range act.diagnostics { for _, sf := range diag.SuggestedFixes { for _, edit := range sf.TextEdits { // Validate the edit. + // Any error here indicates a bug in the analyzer. + file := act.pkg.Fset.File(edit.Pos) + if file == nil { + return fmt.Errorf("analysis %q suggests invalid fix: missing file info for pos (%v)", + act.a.Name, edit.Pos) + } if edit.Pos > edit.End { - return fmt.Errorf( - "diagnostic for analysis %v contains Suggested Fix with malformed edit: pos (%v) > end (%v)", + return fmt.Errorf("analysis %q suggests invalid fix: pos (%v) > end (%v)", act.a.Name, edit.Pos, edit.End) } - file, endfile := act.pkg.Fset.File(edit.Pos), act.pkg.Fset.File(edit.End) - if file == nil || endfile == nil || file != endfile { - return (fmt.Errorf( - "diagnostic for analysis %v contains Suggested Fix with malformed spanning files %v and %v", - act.a.Name, file.Name(), endfile.Name())) + if eof := token.Pos(file.Base() + file.Size()); edit.End > eof { + return fmt.Errorf("analysis %q suggests invalid fix: end (%v) past end of file (%v)", + act.a.Name, edit.End, eof) } - start, end := file.Offset(edit.Pos), file.Offset(edit.End) - - // TODO(matloob): Validate that edits do not affect other packages. - root := editsForFile[file] - if err := insert(&root, offsetedit{start, end, edit.NewText}); err != nil { - return err - } - editsForFile[file] = root // In case the root changed + edit := diff.Edit{Start: file.Offset(edit.Pos), End: file.Offset(edit.End), New: string(edit.NewText)} + editsForTokenFile[file] = append(editsForTokenFile[file], edit) } } } + + for f, edits := range editsForTokenFile { + id, _, err := robustio.GetFileID(f.Name()) + if err != nil { + return err + } + if _, hasId := paths[id]; !hasId { + paths[id] = f.Name() + editsByAction[id] = make(map[*action][]diff.Edit) + } + editsByAction[id][act] = edits + } return nil } @@ -403,59 +374,126 @@ func applyFixes(roots []*action) error { return err } - fset := token.NewFileSet() // Shared by parse calls below - // Now we've got a set of valid edits for each file. Get the new file contents. - for f, tree := range editsForFile { - contents, err := ioutil.ReadFile(f.Name()) - if err != nil { - return err + // Validate and group the edits to each actual file. + editsByPath := make(map[string][]diff.Edit) + for id, actToEdits := range editsByAction { + path := paths[id] + actions := make([]*action, 0, len(actToEdits)) + for act := range actToEdits { + actions = append(actions, act) } - cur := 0 // current position in the file - - var out bytes.Buffer - - var recurse func(*node) - recurse = func(node *node) { - if node.left != nil { - recurse(node.left) + // Does any action create conflicting edits? + for _, act := range actions { + edits := actToEdits[act] + if _, invalid := validateEdits(edits); invalid > 0 { + name, x, y := act.a.Name, edits[invalid-1], edits[invalid] + return diff3Conflict(path, name, name, []diff.Edit{x}, []diff.Edit{y}) } + } - edit := node.edit - if edit.start > cur { - out.Write(contents[cur:edit.start]) - out.Write(edit.newText) - } else if cur == 0 && edit.start == 0 { // edit starts at first character? - out.Write(edit.newText) + // Does any pair of different actions create edits that conflict? + for j := range actions { + for k := range actions[:j] { + x, y := actions[j], actions[k] + if x.a.Name > y.a.Name { + x, y = y, x + } + xedits, yedits := actToEdits[x], actToEdits[y] + combined := append(xedits, yedits...) + if _, invalid := validateEdits(combined); invalid > 0 { + // TODO: consider applying each action's consistent list of edits entirely, + // and then using a three-way merge (such as GNU diff3) on the resulting + // files to report more precisely the parts that actually conflict. + return diff3Conflict(path, x.a.Name, y.a.Name, xedits, yedits) + } } - cur = edit.end + } - if node.right != nil { - recurse(node.right) - } + var edits []diff.Edit + for act := range actToEdits { + edits = append(edits, actToEdits[act]...) } - recurse(tree) - // Write out the rest of the file. - if cur < len(contents) { - out.Write(contents[cur:]) + editsByPath[path], _ = validateEdits(edits) // remove duplicates. already validated. + } + + // Now we've got a set of valid edits for each file. Apply them. + for path, edits := range editsByPath { + contents, err := ioutil.ReadFile(path) + if err != nil { + return err + } + + applied, err := diff.Apply(string(contents), edits) + if err != nil { + return err } + out := []byte(applied) // Try to format the file. - ff, err := parser.ParseFile(fset, f.Name(), out.Bytes(), parser.ParseComments) - if err == nil { - var buf bytes.Buffer - if err = format.Node(&buf, fset, ff); err == nil { - out = buf - } + if formatted, err := format.Source(out); err == nil { + out = formatted } - if err := ioutil.WriteFile(f.Name(), out.Bytes(), 0644); err != nil { + if err := ioutil.WriteFile(path, out, 0644); err != nil { return err } } return nil } +// validateEdits returns a list of edits that is sorted and +// contains no duplicate edits. Returns the index of some +// overlapping adjacent edits if there is one and <0 if the +// edits are valid. +func validateEdits(edits []diff.Edit) ([]diff.Edit, int) { + if len(edits) == 0 { + return nil, -1 + } + equivalent := func(x, y diff.Edit) bool { + return x.Start == y.Start && x.End == y.End && x.New == y.New + } + diff.SortEdits(edits) + unique := []diff.Edit{edits[0]} + invalid := -1 + for i := 1; i < len(edits); i++ { + prev, cur := edits[i-1], edits[i] + // We skip over equivalent edits without considering them + // an error. This handles identical edits coming from the + // multiple ways of loading a package into a + // *go/packages.Packages for testing, e.g. packages "p" and "p [p.test]". + if !equivalent(prev, cur) { + unique = append(unique, cur) + if prev.End > cur.Start { + invalid = i + } + } + } + return unique, invalid +} + +// diff3Conflict returns an error describing two conflicting sets of +// edits on a file at path. +func diff3Conflict(path string, xlabel, ylabel string, xedits, yedits []diff.Edit) error { + contents, err := ioutil.ReadFile(path) + if err != nil { + return err + } + oldlabel, old := "base", string(contents) + + xdiff, err := diff.ToUnified(oldlabel, xlabel, old, xedits) + if err != nil { + return err + } + ydiff, err := diff.ToUnified(oldlabel, ylabel, old, yedits) + if err != nil { + return err + } + + return fmt.Errorf("conflicting edits from %s and %s on %s\nfirst edits:\n%s\nsecond edits:\n%s", + xlabel, ylabel, path, xdiff, ydiff) +} + // printDiagnostics prints the diagnostics for the root packages in either // plain text or JSON format. JSON format also includes errors for any // dependencies. @@ -929,7 +967,7 @@ func factType(fact analysis.Fact) reflect.Type { return t } -// allObjectFacts implements Pass.AllObjectFacts. +// allPackageFacts implements Pass.AllPackageFacts. func (act *action) allPackageFacts() []analysis.PackageFact { facts := make([]analysis.PackageFact, 0, len(act.packageFacts)) for k := range act.packageFacts { diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go index af5e17feeeab6..3fbfebf369378 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -53,10 +53,13 @@ func New(files []*ast.File) *Inspector { // of an ast.Node during a traversal. type event struct { node ast.Node - typ uint64 // typeOf(node) - index int // 1 + index of corresponding pop event, or 0 if this is a pop + typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events + index int // index of corresponding push or pop event } +// TODO: Experiment with storing only the second word of event.node (unsafe.Pointer). +// Type can be recovered from the sole bit in typ. + // Preorder visits all the nodes of the files supplied to New in // depth-first order. It calls f(n) for each node n before it visits // n's children. @@ -72,10 +75,17 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { mask := maskOf(types) for i := 0; i < len(in.events); { ev := in.events[i] - if ev.typ&mask != 0 { - if ev.index > 0 { + if ev.index > i { + // push + if ev.typ&mask != 0 { f(ev.node) } + pop := ev.index + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them and pop. + i = pop + 1 + continue + } } i++ } @@ -94,15 +104,24 @@ func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proc mask := maskOf(types) for i := 0; i < len(in.events); { ev := in.events[i] - if ev.typ&mask != 0 { - if ev.index > 0 { - // push + if ev.index > i { + // push + pop := ev.index + if ev.typ&mask != 0 { if !f(ev.node, true) { - i = ev.index // jump to corresponding pop + 1 + i = pop + 1 // jump to corresponding pop + 1 continue } - } else { - // pop + } + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them. + i = pop + continue + } + } else { + // pop + push := ev.index + if in.events[push].typ&mask != 0 { f(ev.node, false) } } @@ -119,19 +138,26 @@ func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, s var stack []ast.Node for i := 0; i < len(in.events); { ev := in.events[i] - if ev.index > 0 { + if ev.index > i { // push + pop := ev.index stack = append(stack, ev.node) if ev.typ&mask != 0 { if !f(ev.node, true, stack) { - i = ev.index + i = pop + 1 stack = stack[:len(stack)-1] continue } } + if in.events[pop].typ&mask == 0 { + // Subtrees does not contain types: skip them. + i = pop + continue + } } else { // pop - if ev.typ&mask != 0 { + push := ev.index + if in.events[push].typ&mask != 0 { f(ev.node, false, stack) } stack = stack[:len(stack)-1] @@ -157,25 +183,31 @@ func traverse(files []*ast.File) []event { events := make([]event, 0, capacity) var stack []event + stack = append(stack, event{}) // include an extra event so file nodes have a parent for _, f := range files { ast.Inspect(f, func(n ast.Node) bool { if n != nil { // push ev := event{ node: n, - typ: typeOf(n), + typ: 0, // temporarily used to accumulate type bits of subtree index: len(events), // push event temporarily holds own index } stack = append(stack, ev) events = append(events, ev) } else { // pop - ev := stack[len(stack)-1] - stack = stack[:len(stack)-1] + top := len(stack) - 1 + ev := stack[top] + typ := typeOf(ev.node) + push := ev.index + parent := top - 1 - events[ev.index].index = len(events) + 1 // make push refer to pop + events[push].typ = typ // set type of push + stack[parent].typ |= typ | ev.typ // parent's typ contains push and pop's typs. + events[push].index = len(events) // make push refer to pop - ev.index = 0 // turn ev into a pop event + stack = stack[:top] events = append(events, ev) } return true diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 1d5f0e45b2bbf..0f1505b808a69 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -878,12 +878,19 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { // never has to create a types.Package for an indirect dependency, // which would then require that such created packages be explicitly // inserted back into the Import graph as a final step after export data loading. + // (Hence this return is after the Types assignment.) // The Diamond test exercises this case. if !lpkg.needtypes && !lpkg.needsrc { return } if !lpkg.needsrc { - ld.loadFromExportData(lpkg) + if err := ld.loadFromExportData(lpkg); err != nil { + lpkg.Errors = append(lpkg.Errors, Error{ + Pos: "-", + Msg: err.Error(), + Kind: UnknownError, // e.g. can't find/open/parse export data + }) + } return // not a source package, don't get syntax trees } @@ -970,7 +977,8 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { // The config requested loading sources and types, but sources are missing. // Add an error to the package and fall back to loading from export data. appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError}) - ld.loadFromExportData(lpkg) + _ = ld.loadFromExportData(lpkg) // ignore any secondary errors + return // can't get syntax trees for this package } @@ -1194,9 +1202,10 @@ func sameFile(x, y string) bool { return false } -// loadFromExportData returns type information for the specified +// loadFromExportData ensures that type information is present for the specified // package, loading it from an export data file on the first request. -func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error) { +// On success it sets lpkg.Types to a new Package. +func (ld *loader) loadFromExportData(lpkg *loaderPackage) error { if lpkg.PkgPath == "" { log.Fatalf("internal error: Package %s has no PkgPath", lpkg) } @@ -1207,8 +1216,8 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error // must be sequential. (Finer-grained locking would require // changes to the gcexportdata API.) // - // The exportMu lock guards the Package.Pkg field and the - // types.Package it points to, for each Package in the graph. + // The exportMu lock guards the lpkg.Types field and the + // types.Package it points to, for each loaderPackage in the graph. // // Not all accesses to Package.Pkg need to be protected by exportMu: // graph ordering ensures that direct dependencies of source @@ -1217,18 +1226,18 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error defer ld.exportMu.Unlock() if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() { - return tpkg, nil // cache hit + return nil // cache hit } lpkg.IllTyped = true // fail safe if lpkg.ExportFile == "" { // Errors while building export data will have been printed to stderr. - return nil, fmt.Errorf("no export data file") + return fmt.Errorf("no export data file") } f, err := os.Open(lpkg.ExportFile) if err != nil { - return nil, err + return err } defer f.Close() @@ -1240,7 +1249,7 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error // queries.) r, err := gcexportdata.NewReader(f) if err != nil { - return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) } // Build the view. @@ -1284,7 +1293,7 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error // (May modify incomplete packages in view but not create new ones.) tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath) if err != nil { - return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) } if _, ok := view["go.shape"]; ok { // Account for the pseudopackage "go.shape" that gets @@ -1297,8 +1306,7 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error lpkg.Types = tpkg lpkg.IllTyped = false - - return tpkg, nil + return nil } // impliedLoadMode returns loadMode with its dependencies. diff --git a/vendor/golang.org/x/tools/internal/diff/diff.go b/vendor/golang.org/x/tools/internal/diff/diff.go new file mode 100644 index 0000000000000..3b315054c9fac --- /dev/null +++ b/vendor/golang.org/x/tools/internal/diff/diff.go @@ -0,0 +1,162 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package diff computes differences between text files or strings. +package diff + +import ( + "fmt" + "sort" + "strings" +) + +// An Edit describes the replacement of a portion of a text file. +type Edit struct { + Start, End int // byte offsets of the region to replace + New string // the replacement +} + +func (e Edit) String() string { + return fmt.Sprintf("{Start:%d,End:%d,New:%s}", e.Start, e.End, e.New) +} + +// Apply applies a sequence of edits to the src buffer and returns the +// result. Edits are applied in order of start offset; edits with the +// same start offset are applied in they order they were provided. +// +// Apply returns an error if any edit is out of bounds, +// or if any pair of edits is overlapping. +func Apply(src string, edits []Edit) (string, error) { + edits, size, err := validate(src, edits) + if err != nil { + return "", err + } + + // Apply edits. + out := make([]byte, 0, size) + lastEnd := 0 + for _, edit := range edits { + if lastEnd < edit.Start { + out = append(out, src[lastEnd:edit.Start]...) + } + out = append(out, edit.New...) + lastEnd = edit.End + } + out = append(out, src[lastEnd:]...) + + if len(out) != size { + panic("wrong size") + } + + return string(out), nil +} + +// validate checks that edits are consistent with src, +// and returns the size of the patched output. +// It may return a different slice. +func validate(src string, edits []Edit) ([]Edit, int, error) { + if !sort.IsSorted(editsSort(edits)) { + edits = append([]Edit(nil), edits...) + SortEdits(edits) + } + + // Check validity of edits and compute final size. + size := len(src) + lastEnd := 0 + for _, edit := range edits { + if !(0 <= edit.Start && edit.Start <= edit.End && edit.End <= len(src)) { + return nil, 0, fmt.Errorf("diff has out-of-bounds edits") + } + if edit.Start < lastEnd { + return nil, 0, fmt.Errorf("diff has overlapping edits") + } + size += len(edit.New) + edit.Start - edit.End + lastEnd = edit.End + } + + return edits, size, nil +} + +// SortEdits orders a slice of Edits by (start, end) offset. +// This ordering puts insertions (end = start) before deletions +// (end > start) at the same point, but uses a stable sort to preserve +// the order of multiple insertions at the same point. +// (Apply detects multiple deletions at the same point as an error.) +func SortEdits(edits []Edit) { + sort.Stable(editsSort(edits)) +} + +type editsSort []Edit + +func (a editsSort) Len() int { return len(a) } +func (a editsSort) Less(i, j int) bool { + if cmp := a[i].Start - a[j].Start; cmp != 0 { + return cmp < 0 + } + return a[i].End < a[j].End +} +func (a editsSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// lineEdits expands and merges a sequence of edits so that each +// resulting edit replaces one or more complete lines. +// See ApplyEdits for preconditions. +func lineEdits(src string, edits []Edit) ([]Edit, error) { + edits, _, err := validate(src, edits) + if err != nil { + return nil, err + } + + // Do all edits begin and end at the start of a line? + // TODO(adonovan): opt: is this fast path necessary? + // (Also, it complicates the result ownership.) + for _, edit := range edits { + if edit.Start >= len(src) || // insertion at EOF + edit.Start > 0 && src[edit.Start-1] != '\n' || // not at line start + edit.End > 0 && src[edit.End-1] != '\n' { // not at line start + goto expand + } + } + return edits, nil // aligned + +expand: + expanded := make([]Edit, 0, len(edits)) // a guess + prev := edits[0] + // TODO(adonovan): opt: start from the first misaligned edit. + // TODO(adonovan): opt: avoid quadratic cost of string += string. + for _, edit := range edits[1:] { + between := src[prev.End:edit.Start] + if !strings.Contains(between, "\n") { + // overlapping lines: combine with previous edit. + prev.New += between + edit.New + prev.End = edit.End + } else { + // non-overlapping lines: flush previous edit. + expanded = append(expanded, expandEdit(prev, src)) + prev = edit + } + } + return append(expanded, expandEdit(prev, src)), nil // flush final edit +} + +// expandEdit returns edit expanded to complete whole lines. +func expandEdit(edit Edit, src string) Edit { + // Expand start left to start of line. + // (delta is the zero-based column number of of start.) + start := edit.Start + if delta := start - 1 - strings.LastIndex(src[:start], "\n"); delta > 0 { + edit.Start -= delta + edit.New = src[start-delta:start] + edit.New + } + + // Expand end right to end of line. + end := edit.End + if nl := strings.IndexByte(src[end:], '\n'); nl < 0 { + edit.End = len(src) // extend to EOF + } else { + edit.End = end + nl + 1 // extend beyond \n + } + edit.New += src[end:edit.End] + + return edit +} diff --git a/vendor/golang.org/x/tools/internal/diff/lcs/common.go b/vendor/golang.org/x/tools/internal/diff/lcs/common.go new file mode 100644 index 0000000000000..c3e82dd26839a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/diff/lcs/common.go @@ -0,0 +1,179 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +import ( + "log" + "sort" +) + +// lcs is a longest common sequence +type lcs []diag + +// A diag is a piece of the edit graph where A[X+i] == B[Y+i], for 0<=i l[j].Len + }) + return l +} + +// validate that the elements of the lcs do not overlap +// (can only happen when the two-sided algorithm ends early) +// expects the lcs to be sorted +func (l lcs) valid() bool { + for i := 1; i < len(l); i++ { + if l[i-1].X+l[i-1].Len > l[i].X { + return false + } + if l[i-1].Y+l[i-1].Len > l[i].Y { + return false + } + } + return true +} + +// repair overlapping lcs +// only called if two-sided stops early +func (l lcs) fix() lcs { + // from the set of diagonals in l, find a maximal non-conflicting set + // this problem may be NP-complete, but we use a greedy heuristic, + // which is quadratic, but with a better data structure, could be D log D. + // indepedent is not enough: {0,3,1} and {3,0,2} can't both occur in an lcs + // which has to have monotone x and y + if len(l) == 0 { + return nil + } + sort.Slice(l, func(i, j int) bool { return l[i].Len > l[j].Len }) + tmp := make(lcs, 0, len(l)) + tmp = append(tmp, l[0]) + for i := 1; i < len(l); i++ { + var dir direction + nxt := l[i] + for _, in := range tmp { + if dir, nxt = overlap(in, nxt); dir == empty || dir == bad { + break + } + } + if nxt.Len > 0 && dir != bad { + tmp = append(tmp, nxt) + } + } + tmp.sort() + if false && !tmp.valid() { // debug checking + log.Fatalf("here %d", len(tmp)) + } + return tmp +} + +type direction int + +const ( + empty direction = iota // diag is empty (so not in lcs) + leftdown // proposed acceptably to the left and below + rightup // proposed diag is acceptably to the right and above + bad // proposed diag is inconsistent with the lcs so far +) + +// overlap trims the proposed diag prop so it doesn't overlap with +// the existing diag that has already been added to the lcs. +func overlap(exist, prop diag) (direction, diag) { + if prop.X <= exist.X && exist.X < prop.X+prop.Len { + // remove the end of prop where it overlaps with the X end of exist + delta := prop.X + prop.Len - exist.X + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + } + if exist.X <= prop.X && prop.X < exist.X+exist.Len { + // remove the beginning of prop where overlaps with exist + delta := exist.X + exist.Len - prop.X + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + prop.X += delta + prop.Y += delta + } + if prop.Y <= exist.Y && exist.Y < prop.Y+prop.Len { + // remove the end of prop that overlaps (in Y) with exist + delta := prop.Y + prop.Len - exist.Y + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + } + if exist.Y <= prop.Y && prop.Y < exist.Y+exist.Len { + // remove the beginning of peop that overlaps with exist + delta := exist.Y + exist.Len - prop.Y + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + prop.X += delta // no test reaches this code + prop.Y += delta + } + if prop.X+prop.Len <= exist.X && prop.Y+prop.Len <= exist.Y { + return leftdown, prop + } + if exist.X+exist.Len <= prop.X && exist.Y+exist.Len <= prop.Y { + return rightup, prop + } + // prop can't be in an lcs that contains exist + return bad, prop +} + +// manipulating Diag and lcs + +// prepend a diagonal (x,y)-(x+1,y+1) segment either to an empty lcs +// or to its first Diag. prepend is only called to extend diagonals +// the backward direction. +func (lcs lcs) prepend(x, y int) lcs { + if len(lcs) > 0 { + d := &lcs[0] + if int(d.X) == x+1 && int(d.Y) == y+1 { + // extend the diagonal down and to the left + d.X, d.Y = int(x), int(y) + d.Len++ + return lcs + } + } + + r := diag{X: int(x), Y: int(y), Len: 1} + lcs = append([]diag{r}, lcs...) + return lcs +} + +// append appends a diagonal, or extends the existing one. +// by adding the edge (x,y)-(x+1.y+1). append is only called +// to extend diagonals in the forward direction. +func (lcs lcs) append(x, y int) lcs { + if len(lcs) > 0 { + last := &lcs[len(lcs)-1] + // Expand last element if adjoining. + if last.X+last.Len == x && last.Y+last.Len == y { + last.Len++ + return lcs + } + } + + return append(lcs, diag{X: x, Y: y, Len: 1}) +} + +// enforce constraint on d, k +func ok(d, k int) bool { + return d >= 0 && -d <= k && k <= d +} diff --git a/vendor/golang.org/x/tools/internal/diff/lcs/doc.go b/vendor/golang.org/x/tools/internal/diff/lcs/doc.go new file mode 100644 index 0000000000000..dc779f38a017a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/diff/lcs/doc.go @@ -0,0 +1,156 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// package lcs contains code to find longest-common-subsequences +// (and diffs) +package lcs + +/* +Compute longest-common-subsequences of two slices A, B using +algorithms from Myers' paper. A longest-common-subsequence +(LCS from now on) of A and B is a maximal set of lexically increasing +pairs of subscripts (x,y) with A[x]==B[y]. There may be many LCS, but +they all have the same length. An LCS determines a sequence of edits +that changes A into B. + +The key concept is the edit graph of A and B. +If A has length N and B has length M, then the edit graph has +vertices v[i][j] for 0 <= i <= N, 0 <= j <= M. There is a +horizontal edge from v[i][j] to v[i+1][j] whenever both are in +the graph, and a vertical edge from v[i][j] to f[i][j+1] similarly. +When A[i] == B[j] there is a diagonal edge from v[i][j] to v[i+1][j+1]. + +A path between in the graph between (0,0) and (N,M) determines a sequence +of edits converting A into B: each horizontal edge corresponds to removing +an element of A, and each vertical edge corresponds to inserting an +element of B. + +A vertex (x,y) is on (forward) diagonal k if x-y=k. A path in the graph +is of length D if it has D non-diagonal edges. The algorithms generate +forward paths (in which at least one of x,y increases at each edge), +or backward paths (in which at least one of x,y decreases at each edge), +or a combination. (Note that the orientation is the traditional mathematical one, +with the origin in the lower-left corner.) + +Here is the edit graph for A:"aabbaa", B:"aacaba". (I know the diagonals look weird.) + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + b | | | ___/‾‾‾ | ___/‾‾‾ | | | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + c | | | | | | | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a a b b a a + + +The algorithm labels a vertex (x,y) with D,k if it is on diagonal k and at +the end of a maximal path of length D. (Because x-y=k it suffices to remember +only the x coordinate of the vertex.) + +The forward algorithm: Find the longest diagonal starting at (0,0) and +label its end with D=0,k=0. From that vertex take a vertical step and +then follow the longest diagonal (up and to the right), and label that vertex +with D=1,k=-1. From the D=0,k=0 point take a horizontal step and the follow +the longest diagonal (up and to the right) and label that vertex +D=1,k=1. In the same way, having labelled all the D vertices, +from a vertex labelled D,k find two vertices +tentatively labelled D+1,k-1 and D+1,k+1. There may be two on the same +diagonal, in which case take the one with the larger x. + +Eventually the path gets to (N,M), and the diagonals on it are the LCS. + +Here is the edit graph with the ends of D-paths labelled. (So, for instance, +0/2,2 indicates that x=2,y=2 is labelled with 0, as it should be, since the first +step is to go up the longest diagonal from (0,0).) +A:"aabbaa", B:"aacaba" + ⊙ ------- ⊙ ------- ⊙ -------(3/3,6)------- ⊙ -------(3/5,6)-------(4/6,6) + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ -------(2/3,5)------- ⊙ ------- ⊙ ------- ⊙ + b | | | ___/‾‾‾ | ___/‾‾‾ | | | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ -------(3/5,4)------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ -------(1/2,3)-------(2/3,3)------- ⊙ ------- ⊙ ------- ⊙ + c | | | | | | | + ⊙ ------- ⊙ -------(0/2,2)-------(1/3,2)-------(2/4,2)-------(3/5,2)-------(4/6,2) + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a a b b a a + +The 4-path is reconstructed starting at (4/6,6), horizontal to (3/5,6), diagonal to (3,4), vertical +to (2/3,3), horizontal to (1/2,3), vertical to (0/2,2), and diagonal to (0,0). As expected, +there are 4 non-diagonal steps, and the diagonals form an LCS. + +There is a symmetric backward algorithm, which gives (backwards labels are prefixed with a colon): +A:"aabbaa", B:"aacaba" + ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ + a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ | + ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ --------(:0/5,5)-------- ⊙ + b | | | ____/‾‾‾ | ____/‾‾‾ | | | + ⊙ -------- ⊙ -------- ⊙ --------(:1/3,4)-------- ⊙ -------- ⊙ -------- ⊙ + a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ | + (:3/0,3)--------(:2/1,3)-------- ⊙ --------(:2/3,3)--------(:1/4,3)-------- ⊙ -------- ⊙ + c | | | | | | | + ⊙ -------- ⊙ -------- ⊙ --------(:3/3,2)--------(:2/4,2)-------- ⊙ -------- ⊙ + a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ | + (:3/0,1)-------- ⊙ -------- ⊙ -------- ⊙ --------(:3/4,1)-------- ⊙ -------- ⊙ + a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ | + (:4/0,0)-------- ⊙ -------- ⊙ -------- ⊙ --------(:4/4,0)-------- ⊙ -------- ⊙ + a a b b a a + +Neither of these is ideal for use in an editor, where it is undesirable to send very long diffs to the +front end. It's tricky to decide exactly what 'very long diffs' means, as "replace A by B" is very short. +We want to control how big D can be, by stopping when it gets too large. The forward algorithm then +privileges common prefixes, and the backward algorithm privileges common suffixes. Either is an undesirable +asymmetry. + +Fortunately there is a two-sided algorithm, implied by results in Myers' paper. Here's what the labels in +the edit graph look like. +A:"aabbaa", B:"aacaba" + ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ + a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ | + ⊙ --------- ⊙ --------- ⊙ --------- (2/3,5) --------- ⊙ --------- (:0/5,5)--------- ⊙ + b | | | ____/‾‾‾‾ | ____/‾‾‾‾ | | | + ⊙ --------- ⊙ --------- ⊙ --------- (:1/3,4)--------- ⊙ --------- ⊙ --------- ⊙ + a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ | + ⊙ --------- (:2/1,3)--------- (1/2,3) ---------(2:2/3,3)--------- (:1/4,3)--------- ⊙ --------- ⊙ + c | | | | | | | + ⊙ --------- ⊙ --------- (0/2,2) --------- (1/3,2) ---------(2:2/4,2)--------- ⊙ --------- ⊙ + a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ | + ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ + a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ | + ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ + a a b b a a + +The algorithm stopped when it saw the backwards 2-path ending at (1,3) and the forwards 2-path ending at (3,5). The criterion +is a backwards path ending at (u,v) and a forward path ending at (x,y), where u <= x and the two points are on the same +diagonal. (Here the edgegraph has a diagonal, but the criterion is x-y=u-v.) Myers proves there is a forward +2-path from (0,0) to (1,3), and that together with the backwards 2-path ending at (1,3) gives the expected 4-path. +Unfortunately the forward path has to be constructed by another run of the forward algorithm; it can't be found from the +computed labels. That is the worst case. Had the code noticed (x,y)=(u,v)=(3,3) the whole path could be reconstructed +from the edgegraph. The implementation looks for a number of special cases to try to avoid computing an extra forward path. + +If the two-sided algorithm has stop early (because D has become too large) it will have found a forward LCS and a +backwards LCS. Ideally these go with disjoint prefixes and suffixes of A and B, but disjointness may fail and the two +computed LCS may conflict. (An easy example is where A is a suffix of B, and shares a short prefix. The backwards LCS +is all of A, and the forward LCS is a prefix of A.) The algorithm combines the two +to form a best-effort LCS. In the worst case the forward partial LCS may have to +be recomputed. +*/ + +/* Eugene Myers paper is titled +"An O(ND) Difference Algorithm and Its Variations" +and can be found at +http://www.xmailserver.org/diff2.pdf + +(There is a generic implementation of the algorithm the the repository with git hash +b9ad7e4ade3a686d608e44475390ad428e60e7fc) +*/ diff --git a/vendor/golang.org/x/tools/internal/diff/lcs/git.sh b/vendor/golang.org/x/tools/internal/diff/lcs/git.sh new file mode 100644 index 0000000000000..6856f84395851 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/diff/lcs/git.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# +# Copyright 2022 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. +# +# Creates a zip file containing all numbered versions +# of the commit history of a large source file, for use +# as input data for the tests of the diff algorithm. +# +# Run script from root of the x/tools repo. + +set -eu + +# WARNING: This script will install the latest version of $file +# The largest real source file in the x/tools repo. +# file=internal/lsp/source/completion/completion.go +# file=internal/lsp/source/diagnostics.go +file=internal/lsp/protocol/tsprotocol.go + +tmp=$(mktemp -d) +git log $file | + awk '/^commit / {print $2}' | + nl -ba -nrz | + while read n hash; do + git checkout --quiet $hash $file + cp -f $file $tmp/$n + done +(cd $tmp && zip -q - *) > testdata.zip +rm -fr $tmp +git restore --staged $file +git restore $file +echo "Created testdata.zip" diff --git a/vendor/golang.org/x/tools/internal/diff/lcs/labels.go b/vendor/golang.org/x/tools/internal/diff/lcs/labels.go new file mode 100644 index 0000000000000..0689f1ed700ab --- /dev/null +++ b/vendor/golang.org/x/tools/internal/diff/lcs/labels.go @@ -0,0 +1,55 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +import ( + "fmt" +) + +// For each D, vec[D] has length D+1, +// and the label for (D, k) is stored in vec[D][(D+k)/2]. +type label struct { + vec [][]int +} + +// Temporary checking DO NOT COMMIT true TO PRODUCTION CODE +const debug = false + +// debugging. check that the (d,k) pair is valid +// (that is, -d<=k<=d and d+k even) +func checkDK(D, k int) { + if k >= -D && k <= D && (D+k)%2 == 0 { + return + } + panic(fmt.Sprintf("out of range, d=%d,k=%d", D, k)) +} + +func (t *label) set(D, k, x int) { + if debug { + checkDK(D, k) + } + for len(t.vec) <= D { + t.vec = append(t.vec, nil) + } + if t.vec[D] == nil { + t.vec[D] = make([]int, D+1) + } + t.vec[D][(D+k)/2] = x // known that D+k is even +} + +func (t *label) get(d, k int) int { + if debug { + checkDK(d, k) + } + return int(t.vec[d][(d+k)/2]) +} + +func newtriang(limit int) label { + if limit < 100 { + // Preallocate if limit is not large. + return label{vec: make([][]int, limit)} + } + return label{} +} diff --git a/vendor/golang.org/x/tools/internal/diff/lcs/old.go b/vendor/golang.org/x/tools/internal/diff/lcs/old.go new file mode 100644 index 0000000000000..7af11fc896cee --- /dev/null +++ b/vendor/golang.org/x/tools/internal/diff/lcs/old.go @@ -0,0 +1,480 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +// TODO(adonovan): remove unclear references to "old" in this package. + +import ( + "fmt" +) + +// A Diff is a replacement of a portion of A by a portion of B. +type Diff struct { + Start, End int // offsets of portion to delete in A + ReplStart, ReplEnd int // offset of replacement text in B +} + +// DiffStrings returns the differences between two strings. +// It does not respect rune boundaries. +func DiffStrings(a, b string) []Diff { return diff(stringSeqs{a, b}) } + +// DiffBytes returns the differences between two byte sequences. +// It does not respect rune boundaries. +func DiffBytes(a, b []byte) []Diff { return diff(bytesSeqs{a, b}) } + +// DiffRunes returns the differences between two rune sequences. +func DiffRunes(a, b []rune) []Diff { return diff(runesSeqs{a, b}) } + +func diff(seqs sequences) []Diff { + // A limit on how deeply the LCS algorithm should search. The value is just a guess. + const maxDiffs = 30 + diff, _ := compute(seqs, twosided, maxDiffs/2) + return diff +} + +// compute computes the list of differences between two sequences, +// along with the LCS. It is exercised directly by tests. +// The algorithm is one of {forward, backward, twosided}. +func compute(seqs sequences, algo func(*editGraph) lcs, limit int) ([]Diff, lcs) { + if limit <= 0 { + limit = 1 << 25 // effectively infinity + } + alen, blen := seqs.lengths() + g := &editGraph{ + seqs: seqs, + vf: newtriang(limit), + vb: newtriang(limit), + limit: limit, + ux: alen, + uy: blen, + delta: alen - blen, + } + lcs := algo(g) + diffs := lcs.toDiffs(alen, blen) + return diffs, lcs +} + +// editGraph carries the information for computing the lcs of two sequences. +type editGraph struct { + seqs sequences + vf, vb label // forward and backward labels + + limit int // maximal value of D + // the bounding rectangle of the current edit graph + lx, ly, ux, uy int + delta int // common subexpression: (ux-lx)-(uy-ly) +} + +// toDiffs converts an LCS to a list of edits. +func (lcs lcs) toDiffs(alen, blen int) []Diff { + var diffs []Diff + var pa, pb int // offsets in a, b + for _, l := range lcs { + if pa < l.X || pb < l.Y { + diffs = append(diffs, Diff{pa, l.X, pb, l.Y}) + } + pa = l.X + l.Len + pb = l.Y + l.Len + } + if pa < alen || pb < blen { + diffs = append(diffs, Diff{pa, alen, pb, blen}) + } + return diffs +} + +// --- FORWARD --- + +// fdone decides if the forwward path has reached the upper right +// corner of the rectangle. If so, it also returns the computed lcs. +func (e *editGraph) fdone(D, k int) (bool, lcs) { + // x, y, k are relative to the rectangle + x := e.vf.get(D, k) + y := x - k + if x == e.ux && y == e.uy { + return true, e.forwardlcs(D, k) + } + return false, nil +} + +// run the forward algorithm, until success or up to the limit on D. +func forward(e *editGraph) lcs { + e.setForward(0, 0, e.lx) + if ok, ans := e.fdone(0, 0); ok { + return ans + } + // from D to D+1 + for D := 0; D < e.limit; D++ { + e.setForward(D+1, -(D + 1), e.getForward(D, -D)) + if ok, ans := e.fdone(D+1, -(D + 1)); ok { + return ans + } + e.setForward(D+1, D+1, e.getForward(D, D)+1) + if ok, ans := e.fdone(D+1, D+1); ok { + return ans + } + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get backwards + lookv := e.lookForward(k, e.getForward(D, k-1)+1) + lookh := e.lookForward(k, e.getForward(D, k+1)) + if lookv > lookh { + e.setForward(D+1, k, lookv) + } else { + e.setForward(D+1, k, lookh) + } + if ok, ans := e.fdone(D+1, k); ok { + return ans + } + } + } + // D is too large + // find the D path with maximal x+y inside the rectangle and + // use that to compute the found part of the lcs + kmax := -e.limit - 1 + diagmax := -1 + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getForward(e.limit, k) + y := x - k + if x+y > diagmax && x <= e.ux && y <= e.uy { + diagmax, kmax = x+y, k + } + } + return e.forwardlcs(e.limit, kmax) +} + +// recover the lcs by backtracking from the farthest point reached +func (e *editGraph) forwardlcs(D, k int) lcs { + var ans lcs + for x := e.getForward(D, k); x != 0 || x-k != 0; { + if ok(D-1, k-1) && x-1 == e.getForward(D-1, k-1) { + // if (x-1,y) is labelled D-1, x--,D--,k--,continue + D, k, x = D-1, k-1, x-1 + continue + } else if ok(D-1, k+1) && x == e.getForward(D-1, k+1) { + // if (x,y-1) is labelled D-1, x, D--,k++, continue + D, k = D-1, k+1 + continue + } + // if (x-1,y-1)--(x,y) is a diagonal, prepend,x--,y--, continue + y := x - k + ans = ans.prepend(x+e.lx-1, y+e.ly-1) + x-- + } + return ans +} + +// start at (x,y), go up the diagonal as far as possible, +// and label the result with d +func (e *editGraph) lookForward(k, relx int) int { + rely := relx - k + x, y := relx+e.lx, rely+e.ly + if x < e.ux && y < e.uy { + x += e.seqs.commonPrefixLen(x, e.ux, y, e.uy) + } + return x +} + +func (e *editGraph) setForward(d, k, relx int) { + x := e.lookForward(k, relx) + e.vf.set(d, k, x-e.lx) +} + +func (e *editGraph) getForward(d, k int) int { + x := e.vf.get(d, k) + return x +} + +// --- BACKWARD --- + +// bdone decides if the backward path has reached the lower left corner +func (e *editGraph) bdone(D, k int) (bool, lcs) { + // x, y, k are relative to the rectangle + x := e.vb.get(D, k) + y := x - (k + e.delta) + if x == 0 && y == 0 { + return true, e.backwardlcs(D, k) + } + return false, nil +} + +// run the backward algorithm, until success or up to the limit on D. +func backward(e *editGraph) lcs { + e.setBackward(0, 0, e.ux) + if ok, ans := e.bdone(0, 0); ok { + return ans + } + // from D to D+1 + for D := 0; D < e.limit; D++ { + e.setBackward(D+1, -(D + 1), e.getBackward(D, -D)-1) + if ok, ans := e.bdone(D+1, -(D + 1)); ok { + return ans + } + e.setBackward(D+1, D+1, e.getBackward(D, D)) + if ok, ans := e.bdone(D+1, D+1); ok { + return ans + } + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get wrong + lookv := e.lookBackward(k, e.getBackward(D, k-1)) + lookh := e.lookBackward(k, e.getBackward(D, k+1)-1) + if lookv < lookh { + e.setBackward(D+1, k, lookv) + } else { + e.setBackward(D+1, k, lookh) + } + if ok, ans := e.bdone(D+1, k); ok { + return ans + } + } + } + + // D is too large + // find the D path with minimal x+y inside the rectangle and + // use that to compute the part of the lcs found + kmax := -e.limit - 1 + diagmin := 1 << 25 + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getBackward(e.limit, k) + y := x - (k + e.delta) + if x+y < diagmin && x >= 0 && y >= 0 { + diagmin, kmax = x+y, k + } + } + if kmax < -e.limit { + panic(fmt.Sprintf("no paths when limit=%d?", e.limit)) + } + return e.backwardlcs(e.limit, kmax) +} + +// recover the lcs by backtracking +func (e *editGraph) backwardlcs(D, k int) lcs { + var ans lcs + for x := e.getBackward(D, k); x != e.ux || x-(k+e.delta) != e.uy; { + if ok(D-1, k-1) && x == e.getBackward(D-1, k-1) { + // D--, k--, x unchanged + D, k = D-1, k-1 + continue + } else if ok(D-1, k+1) && x+1 == e.getBackward(D-1, k+1) { + // D--, k++, x++ + D, k, x = D-1, k+1, x+1 + continue + } + y := x - (k + e.delta) + ans = ans.append(x+e.lx, y+e.ly) + x++ + } + return ans +} + +// start at (x,y), go down the diagonal as far as possible, +func (e *editGraph) lookBackward(k, relx int) int { + rely := relx - (k + e.delta) // forward k = k + e.delta + x, y := relx+e.lx, rely+e.ly + if x > 0 && y > 0 { + x -= e.seqs.commonSuffixLen(0, x, 0, y) + } + return x +} + +// convert to rectangle, and label the result with d +func (e *editGraph) setBackward(d, k, relx int) { + x := e.lookBackward(k, relx) + e.vb.set(d, k, x-e.lx) +} + +func (e *editGraph) getBackward(d, k int) int { + x := e.vb.get(d, k) + return x +} + +// -- TWOSIDED --- + +func twosided(e *editGraph) lcs { + // The termination condition could be improved, as either the forward + // or backward pass could succeed before Myers' Lemma applies. + // Aside from questions of efficiency (is the extra testing cost-effective) + // this is more likely to matter when e.limit is reached. + e.setForward(0, 0, e.lx) + e.setBackward(0, 0, e.ux) + + // from D to D+1 + for D := 0; D < e.limit; D++ { + // just finished a backwards pass, so check + if got, ok := e.twoDone(D, D); ok { + return e.twolcs(D, D, got) + } + // do a forwards pass (D to D+1) + e.setForward(D+1, -(D + 1), e.getForward(D, -D)) + e.setForward(D+1, D+1, e.getForward(D, D)+1) + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get backwards + lookv := e.lookForward(k, e.getForward(D, k-1)+1) + lookh := e.lookForward(k, e.getForward(D, k+1)) + if lookv > lookh { + e.setForward(D+1, k, lookv) + } else { + e.setForward(D+1, k, lookh) + } + } + // just did a forward pass, so check + if got, ok := e.twoDone(D+1, D); ok { + return e.twolcs(D+1, D, got) + } + // do a backward pass, D to D+1 + e.setBackward(D+1, -(D + 1), e.getBackward(D, -D)-1) + e.setBackward(D+1, D+1, e.getBackward(D, D)) + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get wrong + lookv := e.lookBackward(k, e.getBackward(D, k-1)) + lookh := e.lookBackward(k, e.getBackward(D, k+1)-1) + if lookv < lookh { + e.setBackward(D+1, k, lookv) + } else { + e.setBackward(D+1, k, lookh) + } + } + } + + // D too large. combine a forward and backward partial lcs + // first, a forward one + kmax := -e.limit - 1 + diagmax := -1 + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getForward(e.limit, k) + y := x - k + if x+y > diagmax && x <= e.ux && y <= e.uy { + diagmax, kmax = x+y, k + } + } + if kmax < -e.limit { + panic(fmt.Sprintf("no forward paths when limit=%d?", e.limit)) + } + lcs := e.forwardlcs(e.limit, kmax) + // now a backward one + // find the D path with minimal x+y inside the rectangle and + // use that to compute the lcs + diagmin := 1 << 25 // infinity + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getBackward(e.limit, k) + y := x - (k + e.delta) + if x+y < diagmin && x >= 0 && y >= 0 { + diagmin, kmax = x+y, k + } + } + if kmax < -e.limit { + panic(fmt.Sprintf("no backward paths when limit=%d?", e.limit)) + } + lcs = append(lcs, e.backwardlcs(e.limit, kmax)...) + // These may overlap (e.forwardlcs and e.backwardlcs return sorted lcs) + ans := lcs.fix() + return ans +} + +// Does Myers' Lemma apply? +func (e *editGraph) twoDone(df, db int) (int, bool) { + if (df+db+e.delta)%2 != 0 { + return 0, false // diagonals cannot overlap + } + kmin := -db + e.delta + if -df > kmin { + kmin = -df + } + kmax := db + e.delta + if df < kmax { + kmax = df + } + for k := kmin; k <= kmax; k += 2 { + x := e.vf.get(df, k) + u := e.vb.get(db, k-e.delta) + if u <= x { + // is it worth looking at all the other k? + for l := k; l <= kmax; l += 2 { + x := e.vf.get(df, l) + y := x - l + u := e.vb.get(db, l-e.delta) + v := u - l + if x == u || u == 0 || v == 0 || y == e.uy || x == e.ux { + return l, true + } + } + return k, true + } + } + return 0, false +} + +func (e *editGraph) twolcs(df, db, kf int) lcs { + // db==df || db+1==df + x := e.vf.get(df, kf) + y := x - kf + kb := kf - e.delta + u := e.vb.get(db, kb) + v := u - kf + + // Myers proved there is a df-path from (0,0) to (u,v) + // and a db-path from (x,y) to (N,M). + // In the first case the overall path is the forward path + // to (u,v) followed by the backward path to (N,M). + // In the second case the path is the backward path to (x,y) + // followed by the forward path to (x,y) from (0,0). + + // Look for some special cases to avoid computing either of these paths. + if x == u { + // "babaab" "cccaba" + // already patched together + lcs := e.forwardlcs(df, kf) + lcs = append(lcs, e.backwardlcs(db, kb)...) + return lcs.sort() + } + + // is (u-1,v) or (u,v-1) labelled df-1? + // if so, that forward df-1-path plus a horizontal or vertical edge + // is the df-path to (u,v), then plus the db-path to (N,M) + if u > 0 && ok(df-1, u-1-v) && e.vf.get(df-1, u-1-v) == u-1 { + // "aabbab" "cbcabc" + lcs := e.forwardlcs(df-1, u-1-v) + lcs = append(lcs, e.backwardlcs(db, kb)...) + return lcs.sort() + } + if v > 0 && ok(df-1, (u-(v-1))) && e.vf.get(df-1, u-(v-1)) == u { + // "abaabb" "bcacab" + lcs := e.forwardlcs(df-1, u-(v-1)) + lcs = append(lcs, e.backwardlcs(db, kb)...) + return lcs.sort() + } + + // The path can't possibly contribute to the lcs because it + // is all horizontal or vertical edges + if u == 0 || v == 0 || x == e.ux || y == e.uy { + // "abaabb" "abaaaa" + if u == 0 || v == 0 { + return e.backwardlcs(db, kb) + } + return e.forwardlcs(df, kf) + } + + // is (x+1,y) or (x,y+1) labelled db-1? + if x+1 <= e.ux && ok(db-1, x+1-y-e.delta) && e.vb.get(db-1, x+1-y-e.delta) == x+1 { + // "bababb" "baaabb" + lcs := e.backwardlcs(db-1, kb+1) + lcs = append(lcs, e.forwardlcs(df, kf)...) + return lcs.sort() + } + if y+1 <= e.uy && ok(db-1, x-(y+1)-e.delta) && e.vb.get(db-1, x-(y+1)-e.delta) == x { + // "abbbaa" "cabacc" + lcs := e.backwardlcs(db-1, kb-1) + lcs = append(lcs, e.forwardlcs(df, kf)...) + return lcs.sort() + } + + // need to compute another path + // "aabbaa" "aacaba" + lcs := e.backwardlcs(db, kb) + oldx, oldy := e.ux, e.uy + e.ux = u + e.uy = v + lcs = append(lcs, forward(e)...) + e.ux, e.uy = oldx, oldy + return lcs.sort() +} diff --git a/vendor/golang.org/x/tools/internal/diff/lcs/sequence.go b/vendor/golang.org/x/tools/internal/diff/lcs/sequence.go new file mode 100644 index 0000000000000..2d72d2630435b --- /dev/null +++ b/vendor/golang.org/x/tools/internal/diff/lcs/sequence.go @@ -0,0 +1,113 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +// This file defines the abstract sequence over which the LCS algorithm operates. + +// sequences abstracts a pair of sequences, A and B. +type sequences interface { + lengths() (int, int) // len(A), len(B) + commonPrefixLen(ai, aj, bi, bj int) int // len(commonPrefix(A[ai:aj], B[bi:bj])) + commonSuffixLen(ai, aj, bi, bj int) int // len(commonSuffix(A[ai:aj], B[bi:bj])) +} + +type stringSeqs struct{ a, b string } + +func (s stringSeqs) lengths() (int, int) { return len(s.a), len(s.b) } +func (s stringSeqs) commonPrefixLen(ai, aj, bi, bj int) int { + return commonPrefixLenString(s.a[ai:aj], s.b[bi:bj]) +} +func (s stringSeqs) commonSuffixLen(ai, aj, bi, bj int) int { + return commonSuffixLenString(s.a[ai:aj], s.b[bi:bj]) +} + +// The explicit capacity in s[i:j:j] leads to more efficient code. + +type bytesSeqs struct{ a, b []byte } + +func (s bytesSeqs) lengths() (int, int) { return len(s.a), len(s.b) } +func (s bytesSeqs) commonPrefixLen(ai, aj, bi, bj int) int { + return commonPrefixLenBytes(s.a[ai:aj:aj], s.b[bi:bj:bj]) +} +func (s bytesSeqs) commonSuffixLen(ai, aj, bi, bj int) int { + return commonSuffixLenBytes(s.a[ai:aj:aj], s.b[bi:bj:bj]) +} + +type runesSeqs struct{ a, b []rune } + +func (s runesSeqs) lengths() (int, int) { return len(s.a), len(s.b) } +func (s runesSeqs) commonPrefixLen(ai, aj, bi, bj int) int { + return commonPrefixLenRunes(s.a[ai:aj:aj], s.b[bi:bj:bj]) +} +func (s runesSeqs) commonSuffixLen(ai, aj, bi, bj int) int { + return commonSuffixLenRunes(s.a[ai:aj:aj], s.b[bi:bj:bj]) +} + +// TODO(adonovan): optimize these functions using ideas from: +// - https://go.dev/cl/408116 common.go +// - https://go.dev/cl/421435 xor_generic.go + +// TODO(adonovan): factor using generics when available, +// but measure performance impact. + +// commonPrefixLen* returns the length of the common prefix of a[ai:aj] and b[bi:bj]. +func commonPrefixLenBytes(a, b []byte) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[i] == b[i] { + i++ + } + return i +} +func commonPrefixLenRunes(a, b []rune) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[i] == b[i] { + i++ + } + return i +} +func commonPrefixLenString(a, b string) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[i] == b[i] { + i++ + } + return i +} + +// commonSuffixLen* returns the length of the common suffix of a[ai:aj] and b[bi:bj]. +func commonSuffixLenBytes(a, b []byte) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[len(a)-1-i] == b[len(b)-1-i] { + i++ + } + return i +} +func commonSuffixLenRunes(a, b []rune) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[len(a)-1-i] == b[len(b)-1-i] { + i++ + } + return i +} +func commonSuffixLenString(a, b string) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[len(a)-1-i] == b[len(b)-1-i] { + i++ + } + return i +} + +func min(x, y int) int { + if x < y { + return x + } else { + return y + } +} diff --git a/vendor/golang.org/x/tools/internal/diff/ndiff.go b/vendor/golang.org/x/tools/internal/diff/ndiff.go new file mode 100644 index 0000000000000..050b08ded4604 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/diff/ndiff.go @@ -0,0 +1,109 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package diff + +import ( + "bytes" + "unicode/utf8" + + "golang.org/x/tools/internal/diff/lcs" +) + +// Strings computes the differences between two strings. +// The resulting edits respect rune boundaries. +func Strings(before, after string) []Edit { + if before == after { + return nil // common case + } + + if stringIsASCII(before) && stringIsASCII(after) { + // TODO(adonovan): opt: specialize diffASCII for strings. + return diffASCII([]byte(before), []byte(after)) + } + return diffRunes([]rune(before), []rune(after)) +} + +// Bytes computes the differences between two byte slices. +// The resulting edits respect rune boundaries. +func Bytes(before, after []byte) []Edit { + if bytes.Equal(before, after) { + return nil // common case + } + + if bytesIsASCII(before) && bytesIsASCII(after) { + return diffASCII(before, after) + } + return diffRunes(runes(before), runes(after)) +} + +func diffASCII(before, after []byte) []Edit { + diffs := lcs.DiffBytes(before, after) + + // Convert from LCS diffs. + res := make([]Edit, len(diffs)) + for i, d := range diffs { + res[i] = Edit{d.Start, d.End, string(after[d.ReplStart:d.ReplEnd])} + } + return res +} + +func diffRunes(before, after []rune) []Edit { + diffs := lcs.DiffRunes(before, after) + + // The diffs returned by the lcs package use indexes + // into whatever slice was passed in. + // Convert rune offsets to byte offsets. + res := make([]Edit, len(diffs)) + lastEnd := 0 + utf8Len := 0 + for i, d := range diffs { + utf8Len += runesLen(before[lastEnd:d.Start]) // text between edits + start := utf8Len + utf8Len += runesLen(before[d.Start:d.End]) // text deleted by this edit + res[i] = Edit{start, utf8Len, string(after[d.ReplStart:d.ReplEnd])} + lastEnd = d.End + } + return res +} + +// runes is like []rune(string(bytes)) without the duplicate allocation. +func runes(bytes []byte) []rune { + n := utf8.RuneCount(bytes) + runes := make([]rune, n) + for i := 0; i < n; i++ { + r, sz := utf8.DecodeRune(bytes) + bytes = bytes[sz:] + runes[i] = r + } + return runes +} + +// runesLen returns the length in bytes of the UTF-8 encoding of runes. +func runesLen(runes []rune) (len int) { + for _, r := range runes { + len += utf8.RuneLen(r) + } + return len +} + +// stringIsASCII reports whether s contains only ASCII. +// TODO(adonovan): combine when x/tools allows generics. +func stringIsASCII(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} + +func bytesIsASCII(s []byte) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/tools/internal/diff/unified.go b/vendor/golang.org/x/tools/internal/diff/unified.go new file mode 100644 index 0000000000000..fa376f178727d --- /dev/null +++ b/vendor/golang.org/x/tools/internal/diff/unified.go @@ -0,0 +1,248 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package diff + +import ( + "fmt" + "log" + "strings" +) + +// Unified returns a unified diff of the old and new strings. +// The old and new labels are the names of the old and new files. +// If the strings are equal, it returns the empty string. +func Unified(oldLabel, newLabel, old, new string) string { + edits := Strings(old, new) + unified, err := ToUnified(oldLabel, newLabel, old, edits) + if err != nil { + // Can't happen: edits are consistent. + log.Fatalf("internal error in diff.Unified: %v", err) + } + return unified +} + +// ToUnified applies the edits to content and returns a unified diff. +// The old and new labels are the names of the content and result files. +// It returns an error if the edits are inconsistent; see ApplyEdits. +func ToUnified(oldLabel, newLabel, content string, edits []Edit) (string, error) { + u, err := toUnified(oldLabel, newLabel, content, edits) + if err != nil { + return "", err + } + return u.String(), nil +} + +// unified represents a set of edits as a unified diff. +type unified struct { + // From is the name of the original file. + From string + // To is the name of the modified file. + To string + // Hunks is the set of edit hunks needed to transform the file content. + Hunks []*hunk +} + +// Hunk represents a contiguous set of line edits to apply. +type hunk struct { + // The line in the original source where the hunk starts. + FromLine int + // The line in the original source where the hunk finishes. + ToLine int + // The set of line based edits to apply. + Lines []line +} + +// Line represents a single line operation to apply as part of a Hunk. +type line struct { + // Kind is the type of line this represents, deletion, insertion or copy. + Kind OpKind + // Content is the content of this line. + // For deletion it is the line being removed, for all others it is the line + // to put in the output. + Content string +} + +// OpKind is used to denote the type of operation a line represents. +// TODO(adonovan): hide this once the myers package no longer references it. +type OpKind int + +const ( + // Delete is the operation kind for a line that is present in the input + // but not in the output. + Delete OpKind = iota + // Insert is the operation kind for a line that is new in the output. + Insert + // Equal is the operation kind for a line that is the same in the input and + // output, often used to provide context around edited lines. + Equal +) + +// String returns a human readable representation of an OpKind. It is not +// intended for machine processing. +func (k OpKind) String() string { + switch k { + case Delete: + return "delete" + case Insert: + return "insert" + case Equal: + return "equal" + default: + panic("unknown operation kind") + } +} + +const ( + edge = 3 + gap = edge * 2 +) + +// toUnified takes a file contents and a sequence of edits, and calculates +// a unified diff that represents those edits. +func toUnified(fromName, toName string, content string, edits []Edit) (unified, error) { + u := unified{ + From: fromName, + To: toName, + } + if len(edits) == 0 { + return u, nil + } + var err error + edits, err = lineEdits(content, edits) // expand to whole lines + if err != nil { + return u, err + } + lines := splitLines(content) + var h *hunk + last := 0 + toLine := 0 + for _, edit := range edits { + // Compute the zero-based line numbers of the edit start and end. + // TODO(adonovan): opt: compute incrementally, avoid O(n^2). + start := strings.Count(content[:edit.Start], "\n") + end := strings.Count(content[:edit.End], "\n") + if edit.End == len(content) && len(content) > 0 && content[len(content)-1] != '\n' { + end++ // EOF counts as an implicit newline + } + + switch { + case h != nil && start == last: + //direct extension + case h != nil && start <= last+gap: + //within range of previous lines, add the joiners + addEqualLines(h, lines, last, start) + default: + //need to start a new hunk + if h != nil { + // add the edge to the previous hunk + addEqualLines(h, lines, last, last+edge) + u.Hunks = append(u.Hunks, h) + } + toLine += start - last + h = &hunk{ + FromLine: start + 1, + ToLine: toLine + 1, + } + // add the edge to the new hunk + delta := addEqualLines(h, lines, start-edge, start) + h.FromLine -= delta + h.ToLine -= delta + } + last = start + for i := start; i < end; i++ { + h.Lines = append(h.Lines, line{Kind: Delete, Content: lines[i]}) + last++ + } + if edit.New != "" { + for _, content := range splitLines(edit.New) { + h.Lines = append(h.Lines, line{Kind: Insert, Content: content}) + toLine++ + } + } + } + if h != nil { + // add the edge to the final hunk + addEqualLines(h, lines, last, last+edge) + u.Hunks = append(u.Hunks, h) + } + return u, nil +} + +func splitLines(text string) []string { + lines := strings.SplitAfter(text, "\n") + if lines[len(lines)-1] == "" { + lines = lines[:len(lines)-1] + } + return lines +} + +func addEqualLines(h *hunk, lines []string, start, end int) int { + delta := 0 + for i := start; i < end; i++ { + if i < 0 { + continue + } + if i >= len(lines) { + return delta + } + h.Lines = append(h.Lines, line{Kind: Equal, Content: lines[i]}) + delta++ + } + return delta +} + +// String converts a unified diff to the standard textual form for that diff. +// The output of this function can be passed to tools like patch. +func (u unified) String() string { + if len(u.Hunks) == 0 { + return "" + } + b := new(strings.Builder) + fmt.Fprintf(b, "--- %s\n", u.From) + fmt.Fprintf(b, "+++ %s\n", u.To) + for _, hunk := range u.Hunks { + fromCount, toCount := 0, 0 + for _, l := range hunk.Lines { + switch l.Kind { + case Delete: + fromCount++ + case Insert: + toCount++ + default: + fromCount++ + toCount++ + } + } + fmt.Fprint(b, "@@") + if fromCount > 1 { + fmt.Fprintf(b, " -%d,%d", hunk.FromLine, fromCount) + } else if hunk.FromLine == 1 && fromCount == 0 { + // Match odd GNU diff -u behavior adding to empty file. + fmt.Fprintf(b, " -0,0") + } else { + fmt.Fprintf(b, " -%d", hunk.FromLine) + } + if toCount > 1 { + fmt.Fprintf(b, " +%d,%d", hunk.ToLine, toCount) + } else { + fmt.Fprintf(b, " +%d", hunk.ToLine) + } + fmt.Fprint(b, " @@\n") + for _, l := range hunk.Lines { + switch l.Kind { + case Delete: + fmt.Fprintf(b, "-%s", l.Content) + case Insert: + fmt.Fprintf(b, "+%s", l.Content) + default: + fmt.Fprintf(b, " %s", l.Content) + } + if !strings.HasSuffix(l.Content, "\n") { + fmt.Fprintf(b, "\n\\ No newline at end of file\n") + } + } + } + return b.String() +} diff --git a/vendor/golang.org/x/tools/internal/facts/imports.go b/vendor/golang.org/x/tools/internal/facts/imports.go index a3aa90dd1c588..7b21668660c1c 100644 --- a/vendor/golang.org/x/tools/internal/facts/imports.go +++ b/vendor/golang.org/x/tools/internal/facts/imports.go @@ -25,21 +25,20 @@ import ( // by obtaining it from the internals of the gcexportdata decoder. func importMap(imports []*types.Package) map[string]*types.Package { objects := make(map[types.Object]bool) + typs := make(map[types.Type]bool) // Named and TypeParam packages := make(map[string]*types.Package) - var addObj func(obj types.Object) bool + var addObj func(obj types.Object) var addType func(T types.Type) - addObj = func(obj types.Object) bool { + addObj = func(obj types.Object) { if !objects[obj] { objects[obj] = true addType(obj.Type()) if pkg := obj.Pkg(); pkg != nil { packages[pkg.Path()] = pkg } - return true } - return false } addType = func(T types.Type) { @@ -47,8 +46,16 @@ func importMap(imports []*types.Package) map[string]*types.Package { case *types.Basic: // nop case *types.Named: - if addObj(T.Obj()) { - // TODO(taking): Investigate why the Underlying type is not added here. + // Remove infinite expansions of *types.Named by always looking at the origin. + // Some named types with type parameters [that will not type check] have + // infinite expansions: + // type N[T any] struct { F *N[N[T]] } + // importMap() is called on such types when Analyzer.RunDespiteErrors is true. + T = typeparams.NamedTypeOrigin(T).(*types.Named) + if !typs[T] { + typs[T] = true + addObj(T.Obj()) + addType(T.Underlying()) for i := 0; i < T.NumMethods(); i++ { addObj(T.Method(i)) } @@ -102,7 +109,9 @@ func importMap(imports []*types.Package) map[string]*types.Package { addType(T.Term(i).Type()) } case *typeparams.TypeParam: - if addObj(T.Obj()) { + if !typs[T] { + typs[T] = true + addObj(T.Obj()) addType(T.Constraint()) } } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index 7d90f00f323a0..ba53cdcdd1073 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -22,6 +22,7 @@ import ( "strconv" "strings" + "golang.org/x/tools/internal/tokeninternal" "golang.org/x/tools/internal/typeparams" ) @@ -138,6 +139,17 @@ func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, ver p.doDecl(p.declTodo.popHead()) } + // Produce index of offset of each file record in files. + var files intWriter + var fileOffset []uint64 // fileOffset[i] is offset in files of file encoded as i + if p.shallow { + fileOffset = make([]uint64, len(p.fileInfos)) + for i, info := range p.fileInfos { + fileOffset[i] = uint64(files.Len()) + p.encodeFile(&files, info.file, info.needed) + } + } + // Append indices to data0 section. dataLen := uint64(p.data0.Len()) w := p.newWriter() @@ -163,16 +175,75 @@ func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, ver } hdr.uint64(uint64(p.version)) hdr.uint64(uint64(p.strings.Len())) + if p.shallow { + hdr.uint64(uint64(files.Len())) + hdr.uint64(uint64(len(fileOffset))) + for _, offset := range fileOffset { + hdr.uint64(offset) + } + } hdr.uint64(dataLen) // Flush output. io.Copy(out, &hdr) io.Copy(out, &p.strings) + if p.shallow { + io.Copy(out, &files) + } io.Copy(out, &p.data0) return nil } +// encodeFile writes to w a representation of the file sufficient to +// faithfully restore position information about all needed offsets. +// Mutates the needed array. +func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) { + _ = needed[0] // precondition: needed is non-empty + + w.uint64(p.stringOff(file.Name())) + + size := uint64(file.Size()) + w.uint64(size) + + // Sort the set of needed offsets. Duplicates are harmless. + sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] }) + + lines := tokeninternal.GetLines(file) // byte offset of each line start + w.uint64(uint64(len(lines))) + + // Rather than record the entire array of line start offsets, + // we save only a sparse list of (index, offset) pairs for + // the start of each line that contains a needed position. + var sparse [][2]int // (index, offset) pairs +outer: + for i, lineStart := range lines { + lineEnd := size + if i < len(lines)-1 { + lineEnd = uint64(lines[i+1]) + } + // Does this line contains a needed offset? + if needed[0] < lineEnd { + sparse = append(sparse, [2]int{i, lineStart}) + for needed[0] < lineEnd { + needed = needed[1:] + if len(needed) == 0 { + break outer + } + } + } + } + + // Delta-encode the columns. + w.uint64(uint64(len(sparse))) + var prev [2]int + for _, pair := range sparse { + w.uint64(uint64(pair[0] - prev[0])) + w.uint64(uint64(pair[1] - prev[1])) + prev = pair + } +} + // writeIndex writes out an object index. mainIndex indicates whether // we're writing out the main index, which is also read by // non-compiler tools and includes a complete package description @@ -255,6 +326,12 @@ type iexporter struct { strings intWriter stringIndex map[string]uint64 + // In shallow mode, object positions are encoded as (file, offset). + // Each file is recorded as a line-number table. + // Only the lines of needed positions are saved faithfully. + fileInfo map[*token.File]uint64 // value is index in fileInfos + fileInfos []*filePositions + data0 intWriter declIndex map[types.Object]uint64 tparamNames map[types.Object]string // typeparam->exported name @@ -263,6 +340,11 @@ type iexporter struct { indent int // for tracing support } +type filePositions struct { + file *token.File + needed []uint64 // unordered list of needed file offsets +} + func (p *iexporter) trace(format string, args ...interface{}) { if !trace { // Call sites should also be guarded, but having this check here allows @@ -286,6 +368,25 @@ func (p *iexporter) stringOff(s string) uint64 { return off } +// fileIndexAndOffset returns the index of the token.File and the byte offset of pos within it. +func (p *iexporter) fileIndexAndOffset(file *token.File, pos token.Pos) (uint64, uint64) { + index, ok := p.fileInfo[file] + if !ok { + index = uint64(len(p.fileInfo)) + p.fileInfos = append(p.fileInfos, &filePositions{file: file}) + if p.fileInfo == nil { + p.fileInfo = make(map[*token.File]uint64) + } + p.fileInfo[file] = index + } + // Record each needed offset. + info := p.fileInfos[index] + offset := uint64(file.Offset(pos)) + info.needed = append(info.needed, offset) + + return index, offset +} + // pushDecl adds n to the declaration work queue, if not already present. func (p *iexporter) pushDecl(obj types.Object) { // Package unsafe is known to the compiler and predeclared. @@ -346,7 +447,13 @@ func (p *iexporter) doDecl(obj types.Object) { case *types.Func: sig, _ := obj.Type().(*types.Signature) if sig.Recv() != nil { - panic(internalErrorf("unexpected method: %v", sig)) + // We shouldn't see methods in the package scope, + // but the type checker may repair "func () F() {}" + // to "func (Invalid) F()" and then treat it like "func F()", + // so allow that. See golang/go#57729. + if sig.Recv().Type() != types.Typ[types.Invalid] { + panic(internalErrorf("unexpected method: %v", sig)) + } } // Function. @@ -458,13 +565,30 @@ func (w *exportWriter) tag(tag byte) { } func (w *exportWriter) pos(pos token.Pos) { - if w.p.version >= iexportVersionPosCol { + if w.p.shallow { + w.posV2(pos) + } else if w.p.version >= iexportVersionPosCol { w.posV1(pos) } else { w.posV0(pos) } } +// posV2 encoding (used only in shallow mode) records positions as +// (file, offset), where file is the index in the token.File table +// (which records the file name and newline offsets) and offset is a +// byte offset. It effectively ignores //line directives. +func (w *exportWriter) posV2(pos token.Pos) { + if pos == token.NoPos { + w.uint64(0) + return + } + file := w.p.fset.File(pos) // fset must be non-nil + index, offset := w.p.fileIndexAndOffset(file, pos) + w.uint64(1 + index) + w.uint64(offset) +} + func (w *exportWriter) posV1(pos token.Pos) { if w.p.fset == nil { w.int64(0) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index a1c46965350b1..448f903e86a7d 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -137,12 +137,23 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data } sLen := int64(r.uint64()) + var fLen int64 + var fileOffset []uint64 + if insert != nil { + // Shallow mode uses a different position encoding. + fLen = int64(r.uint64()) + fileOffset = make([]uint64, r.uint64()) + for i := range fileOffset { + fileOffset[i] = r.uint64() + } + } dLen := int64(r.uint64()) whence, _ := r.Seek(0, io.SeekCurrent) stringData := data[whence : whence+sLen] - declData := data[whence+sLen : whence+sLen+dLen] - r.Seek(sLen+dLen, io.SeekCurrent) + fileData := data[whence+sLen : whence+sLen+fLen] + declData := data[whence+sLen+fLen : whence+sLen+fLen+dLen] + r.Seek(sLen+fLen+dLen, io.SeekCurrent) p := iimporter{ version: int(version), @@ -151,6 +162,9 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data stringData: stringData, stringCache: make(map[uint64]string), + fileOffset: fileOffset, + fileData: fileData, + fileCache: make([]*token.File, len(fileOffset)), pkgCache: make(map[uint64]*types.Package), declData: declData, @@ -280,6 +294,9 @@ type iimporter struct { stringData []byte stringCache map[uint64]string + fileOffset []uint64 // fileOffset[i] is offset in fileData for info about file encoded as i + fileData []byte + fileCache []*token.File // memoized decoding of file encoded as i pkgCache map[uint64]*types.Package declData []byte @@ -352,6 +369,55 @@ func (p *iimporter) stringAt(off uint64) string { return s } +func (p *iimporter) fileAt(index uint64) *token.File { + file := p.fileCache[index] + if file == nil { + off := p.fileOffset[index] + file = p.decodeFile(intReader{bytes.NewReader(p.fileData[off:]), p.ipath}) + p.fileCache[index] = file + } + return file +} + +func (p *iimporter) decodeFile(rd intReader) *token.File { + filename := p.stringAt(rd.uint64()) + size := int(rd.uint64()) + file := p.fake.fset.AddFile(filename, -1, size) + + // SetLines requires a nondecreasing sequence. + // Because it is common for clients to derive the interval + // [start, start+len(name)] from a start position, and we + // want to ensure that the end offset is on the same line, + // we fill in the gaps of the sparse encoding with values + // that strictly increase by the largest possible amount. + // This allows us to avoid having to record the actual end + // offset of each needed line. + + lines := make([]int, int(rd.uint64())) + var index, offset int + for i, n := 0, int(rd.uint64()); i < n; i++ { + index += int(rd.uint64()) + offset += int(rd.uint64()) + lines[index] = offset + + // Ensure monotonicity between points. + for j := index - 1; j > 0 && lines[j] == 0; j-- { + lines[j] = lines[j+1] - 1 + } + } + + // Ensure monotonicity after last point. + for j := len(lines) - 1; j > 0 && lines[j] == 0; j-- { + size-- + lines[j] = size + } + + if !file.SetLines(lines) { + errorf("SetLines failed: %d", lines) // can't happen + } + return file +} + func (p *iimporter) pkgAt(off uint64) *types.Package { if pkg, ok := p.pkgCache[off]; ok { return pkg @@ -645,6 +711,9 @@ func (r *importReader) qualifiedIdent() (*types.Package, string) { } func (r *importReader) pos() token.Pos { + if r.p.insert != nil { // shallow mode + return r.posv2() + } if r.p.version >= iexportVersionPosCol { r.posv1() } else { @@ -681,6 +750,15 @@ func (r *importReader) posv1() { } } +func (r *importReader) posv2() token.Pos { + file := r.uint64() + if file == 0 { + return token.NoPos + } + tf := r.p.fileAt(file - 1) + return tf.Pos(int(r.uint64())) +} + func (r *importReader) typ() types.Type { return r.p.typAt(r.uint64(), nil) } diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index 646455802ba2f..642a5ac2d7570 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -799,7 +799,7 @@ func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchP return getCandidatePkgs(ctx, callback, filename, filePkg, env) } -var RequiredGoEnvVars = []string{"GO111MODULE", "GOFLAGS", "GOINSECURE", "GOMOD", "GOMODCACHE", "GONOPROXY", "GONOSUMDB", "GOPATH", "GOPROXY", "GOROOT", "GOSUMDB", "GOWORK"} +var requiredGoEnvVars = []string{"GO111MODULE", "GOFLAGS", "GOINSECURE", "GOMOD", "GOMODCACHE", "GONOPROXY", "GONOSUMDB", "GOPATH", "GOPROXY", "GOROOT", "GOSUMDB", "GOWORK"} // ProcessEnv contains environment variables and settings that affect the use of // the go command, the go/build package, etc. @@ -869,7 +869,7 @@ func (e *ProcessEnv) init() error { } foundAllRequired := true - for _, k := range RequiredGoEnvVars { + for _, k := range requiredGoEnvVars { if _, ok := e.Env[k]; !ok { foundAllRequired = false break @@ -885,7 +885,7 @@ func (e *ProcessEnv) init() error { } goEnv := map[string]string{} - stdout, err := e.invokeGo(context.TODO(), "env", append([]string{"-json"}, RequiredGoEnvVars...)...) + stdout, err := e.invokeGo(context.TODO(), "env", append([]string{"-json"}, requiredGoEnvVars...)...) if err != nil { return err } diff --git a/vendor/golang.org/x/tools/internal/imports/zstdlib.go b/vendor/golang.org/x/tools/internal/imports/zstdlib.go index 5db9b2d4c7340..31a75949cdc5c 100644 --- a/vendor/golang.org/x/tools/internal/imports/zstdlib.go +++ b/vendor/golang.org/x/tools/internal/imports/zstdlib.go @@ -10,6 +10,7 @@ var stdlib = map[string][]string{ "archive/tar": { "ErrFieldTooLong", "ErrHeader", + "ErrInsecurePath", "ErrWriteAfterClose", "ErrWriteTooLong", "FileInfoHeader", @@ -45,6 +46,7 @@ var stdlib = map[string][]string{ "ErrAlgorithm", "ErrChecksum", "ErrFormat", + "ErrInsecurePath", "File", "FileHeader", "FileInfoHeader", @@ -87,12 +89,15 @@ var stdlib = map[string][]string{ }, "bytes": { "Buffer", + "Clone", "Compare", "Contains", "ContainsAny", "ContainsRune", "Count", "Cut", + "CutPrefix", + "CutSuffix", "Equal", "EqualFold", "ErrTooLarge", @@ -224,12 +229,15 @@ var stdlib = map[string][]string{ }, "context": { "Background", + "CancelCauseFunc", "CancelFunc", "Canceled", + "Cause", "Context", "DeadlineExceeded", "TODO", "WithCancel", + "WithCancelCause", "WithDeadline", "WithTimeout", "WithValue", @@ -306,6 +314,15 @@ var stdlib = map[string][]string{ "Sign", "Verify", }, + "crypto/ecdh": { + "Curve", + "P256", + "P384", + "P521", + "PrivateKey", + "PublicKey", + "X25519", + }, "crypto/ecdsa": { "GenerateKey", "PrivateKey", @@ -318,6 +335,7 @@ var stdlib = map[string][]string{ "crypto/ed25519": { "GenerateKey", "NewKeyFromSeed", + "Options", "PrivateKey", "PrivateKeySize", "PublicKey", @@ -326,6 +344,7 @@ var stdlib = map[string][]string{ "Sign", "SignatureSize", "Verify", + "VerifyWithOptions", }, "crypto/elliptic": { "Curve", @@ -423,10 +442,12 @@ var stdlib = map[string][]string{ "ConstantTimeEq", "ConstantTimeLessOrEq", "ConstantTimeSelect", + "XORBytes", }, "crypto/tls": { "Certificate", "CertificateRequestInfo", + "CertificateVerificationError", "CipherSuite", "CipherSuiteName", "CipherSuites", @@ -604,6 +625,7 @@ var stdlib = map[string][]string{ "SHA384WithRSAPSS", "SHA512WithRSA", "SHA512WithRSAPSS", + "SetFallbackRoots", "SignatureAlgorithm", "SystemCertPool", "SystemRootsError", @@ -1828,19 +1850,42 @@ var stdlib = map[string][]string{ "R_INFO32", "R_LARCH", "R_LARCH_32", + "R_LARCH_32_PCREL", "R_LARCH_64", + "R_LARCH_ABS64_HI12", + "R_LARCH_ABS64_LO20", + "R_LARCH_ABS_HI20", + "R_LARCH_ABS_LO12", "R_LARCH_ADD16", "R_LARCH_ADD24", "R_LARCH_ADD32", "R_LARCH_ADD64", "R_LARCH_ADD8", + "R_LARCH_B16", + "R_LARCH_B21", + "R_LARCH_B26", "R_LARCH_COPY", + "R_LARCH_GNU_VTENTRY", + "R_LARCH_GNU_VTINHERIT", + "R_LARCH_GOT64_HI12", + "R_LARCH_GOT64_LO20", + "R_LARCH_GOT64_PC_HI12", + "R_LARCH_GOT64_PC_LO20", + "R_LARCH_GOT_HI20", + "R_LARCH_GOT_LO12", + "R_LARCH_GOT_PC_HI20", + "R_LARCH_GOT_PC_LO12", "R_LARCH_IRELATIVE", "R_LARCH_JUMP_SLOT", "R_LARCH_MARK_LA", "R_LARCH_MARK_PCREL", "R_LARCH_NONE", + "R_LARCH_PCALA64_HI12", + "R_LARCH_PCALA64_LO20", + "R_LARCH_PCALA_HI20", + "R_LARCH_PCALA_LO12", "R_LARCH_RELATIVE", + "R_LARCH_RELAX", "R_LARCH_SOP_ADD", "R_LARCH_SOP_AND", "R_LARCH_SOP_ASSERT", @@ -1875,6 +1920,22 @@ var stdlib = map[string][]string{ "R_LARCH_TLS_DTPMOD64", "R_LARCH_TLS_DTPREL32", "R_LARCH_TLS_DTPREL64", + "R_LARCH_TLS_GD_HI20", + "R_LARCH_TLS_GD_PC_HI20", + "R_LARCH_TLS_IE64_HI12", + "R_LARCH_TLS_IE64_LO20", + "R_LARCH_TLS_IE64_PC_HI12", + "R_LARCH_TLS_IE64_PC_LO20", + "R_LARCH_TLS_IE_HI20", + "R_LARCH_TLS_IE_LO12", + "R_LARCH_TLS_IE_PC_HI20", + "R_LARCH_TLS_IE_PC_LO12", + "R_LARCH_TLS_LD_HI20", + "R_LARCH_TLS_LD_PC_HI20", + "R_LARCH_TLS_LE64_HI12", + "R_LARCH_TLS_LE64_LO20", + "R_LARCH_TLS_LE_HI20", + "R_LARCH_TLS_LE_LO12", "R_LARCH_TLS_TPREL32", "R_LARCH_TLS_TPREL64", "R_MIPS", @@ -1938,15 +1999,25 @@ var stdlib = map[string][]string{ "R_PPC64_ADDR16_HIGH", "R_PPC64_ADDR16_HIGHA", "R_PPC64_ADDR16_HIGHER", + "R_PPC64_ADDR16_HIGHER34", "R_PPC64_ADDR16_HIGHERA", + "R_PPC64_ADDR16_HIGHERA34", "R_PPC64_ADDR16_HIGHEST", + "R_PPC64_ADDR16_HIGHEST34", "R_PPC64_ADDR16_HIGHESTA", + "R_PPC64_ADDR16_HIGHESTA34", "R_PPC64_ADDR16_LO", "R_PPC64_ADDR16_LO_DS", "R_PPC64_ADDR24", "R_PPC64_ADDR32", "R_PPC64_ADDR64", "R_PPC64_ADDR64_LOCAL", + "R_PPC64_COPY", + "R_PPC64_D28", + "R_PPC64_D34", + "R_PPC64_D34_HA30", + "R_PPC64_D34_HI30", + "R_PPC64_D34_LO", "R_PPC64_DTPMOD64", "R_PPC64_DTPREL16", "R_PPC64_DTPREL16_DS", @@ -1960,8 +2031,12 @@ var stdlib = map[string][]string{ "R_PPC64_DTPREL16_HIGHESTA", "R_PPC64_DTPREL16_LO", "R_PPC64_DTPREL16_LO_DS", + "R_PPC64_DTPREL34", "R_PPC64_DTPREL64", "R_PPC64_ENTRY", + "R_PPC64_GLOB_DAT", + "R_PPC64_GNU_VTENTRY", + "R_PPC64_GNU_VTINHERIT", "R_PPC64_GOT16", "R_PPC64_GOT16_DS", "R_PPC64_GOT16_HA", @@ -1972,29 +2047,50 @@ var stdlib = map[string][]string{ "R_PPC64_GOT_DTPREL16_HA", "R_PPC64_GOT_DTPREL16_HI", "R_PPC64_GOT_DTPREL16_LO_DS", + "R_PPC64_GOT_DTPREL_PCREL34", + "R_PPC64_GOT_PCREL34", "R_PPC64_GOT_TLSGD16", "R_PPC64_GOT_TLSGD16_HA", "R_PPC64_GOT_TLSGD16_HI", "R_PPC64_GOT_TLSGD16_LO", + "R_PPC64_GOT_TLSGD_PCREL34", "R_PPC64_GOT_TLSLD16", "R_PPC64_GOT_TLSLD16_HA", "R_PPC64_GOT_TLSLD16_HI", "R_PPC64_GOT_TLSLD16_LO", + "R_PPC64_GOT_TLSLD_PCREL34", "R_PPC64_GOT_TPREL16_DS", "R_PPC64_GOT_TPREL16_HA", "R_PPC64_GOT_TPREL16_HI", "R_PPC64_GOT_TPREL16_LO_DS", + "R_PPC64_GOT_TPREL_PCREL34", "R_PPC64_IRELATIVE", "R_PPC64_JMP_IREL", "R_PPC64_JMP_SLOT", "R_PPC64_NONE", + "R_PPC64_PCREL28", + "R_PPC64_PCREL34", + "R_PPC64_PCREL_OPT", + "R_PPC64_PLT16_HA", + "R_PPC64_PLT16_HI", + "R_PPC64_PLT16_LO", "R_PPC64_PLT16_LO_DS", + "R_PPC64_PLT32", + "R_PPC64_PLT64", + "R_PPC64_PLTCALL", + "R_PPC64_PLTCALL_NOTOC", "R_PPC64_PLTGOT16", "R_PPC64_PLTGOT16_DS", "R_PPC64_PLTGOT16_HA", "R_PPC64_PLTGOT16_HI", "R_PPC64_PLTGOT16_LO", "R_PPC64_PLTGOT_LO_DS", + "R_PPC64_PLTREL32", + "R_PPC64_PLTREL64", + "R_PPC64_PLTSEQ", + "R_PPC64_PLTSEQ_NOTOC", + "R_PPC64_PLT_PCREL34", + "R_PPC64_PLT_PCREL34_NOTOC", "R_PPC64_REL14", "R_PPC64_REL14_BRNTAKEN", "R_PPC64_REL14_BRTAKEN", @@ -2002,13 +2098,28 @@ var stdlib = map[string][]string{ "R_PPC64_REL16DX_HA", "R_PPC64_REL16_HA", "R_PPC64_REL16_HI", + "R_PPC64_REL16_HIGH", + "R_PPC64_REL16_HIGHA", + "R_PPC64_REL16_HIGHER", + "R_PPC64_REL16_HIGHER34", + "R_PPC64_REL16_HIGHERA", + "R_PPC64_REL16_HIGHERA34", + "R_PPC64_REL16_HIGHEST", + "R_PPC64_REL16_HIGHEST34", + "R_PPC64_REL16_HIGHESTA", + "R_PPC64_REL16_HIGHESTA34", "R_PPC64_REL16_LO", "R_PPC64_REL24", "R_PPC64_REL24_NOTOC", + "R_PPC64_REL30", "R_PPC64_REL32", "R_PPC64_REL64", "R_PPC64_RELATIVE", + "R_PPC64_SECTOFF", "R_PPC64_SECTOFF_DS", + "R_PPC64_SECTOFF_HA", + "R_PPC64_SECTOFF_HI", + "R_PPC64_SECTOFF_LO", "R_PPC64_SECTOFF_LO_DS", "R_PPC64_TLS", "R_PPC64_TLSGD", @@ -2033,7 +2144,11 @@ var stdlib = map[string][]string{ "R_PPC64_TPREL16_HIGHESTA", "R_PPC64_TPREL16_LO", "R_PPC64_TPREL16_LO_DS", + "R_PPC64_TPREL34", "R_PPC64_TPREL64", + "R_PPC64_UADDR16", + "R_PPC64_UADDR32", + "R_PPC64_UADDR64", "R_PPC_ADDR14", "R_PPC_ADDR14_BRNTAKEN", "R_PPC_ADDR14_BRTAKEN", @@ -2581,6 +2696,9 @@ var stdlib = map[string][]string{ "IMAGE_FILE_MACHINE_POWERPC", "IMAGE_FILE_MACHINE_POWERPCFP", "IMAGE_FILE_MACHINE_R4000", + "IMAGE_FILE_MACHINE_RISCV128", + "IMAGE_FILE_MACHINE_RISCV32", + "IMAGE_FILE_MACHINE_RISCV64", "IMAGE_FILE_MACHINE_SH3", "IMAGE_FILE_MACHINE_SH3DSP", "IMAGE_FILE_MACHINE_SH4", @@ -2846,6 +2964,7 @@ var stdlib = map[string][]string{ "errors": { "As", "Is", + "Join", "New", "Unwrap", }, @@ -2916,6 +3035,7 @@ var stdlib = map[string][]string{ "Appendf", "Appendln", "Errorf", + "FormatString", "Formatter", "Fprint", "Fprintf", @@ -3401,6 +3521,7 @@ var stdlib = map[string][]string{ "RecvOnly", "RelativeTo", "Rune", + "Satisfies", "Scope", "Selection", "SelectionKind", @@ -3702,8 +3823,10 @@ var stdlib = map[string][]string{ "LimitedReader", "MultiReader", "MultiWriter", + "NewOffsetWriter", "NewSectionReader", "NopCloser", + "OffsetWriter", "Pipe", "PipeReader", "PipeWriter", @@ -3770,6 +3893,7 @@ var stdlib = map[string][]string{ "ReadDirFile", "ReadFile", "ReadFileFS", + "SkipAll", "SkipDir", "Stat", "StatFS", @@ -4140,6 +4264,7 @@ var stdlib = map[string][]string{ "FlagLoopback", "FlagMulticast", "FlagPointToPoint", + "FlagRunning", "FlagUp", "Flags", "HardwareAddr", @@ -4284,6 +4409,7 @@ var stdlib = map[string][]string{ "NewFileTransport", "NewRequest", "NewRequestWithContext", + "NewResponseController", "NewServeMux", "NoBody", "NotFound", @@ -4303,6 +4429,7 @@ var stdlib = map[string][]string{ "RedirectHandler", "Request", "Response", + "ResponseController", "ResponseWriter", "RoundTripper", "SameSite", @@ -4445,6 +4572,7 @@ var stdlib = map[string][]string{ "NewProxyClientConn", "NewServerConn", "NewSingleHostReverseProxy", + "ProxyRequest", "ReverseProxy", "ServerConn", }, @@ -4476,6 +4604,8 @@ var stdlib = map[string][]string{ "AddrPortFrom", "IPv4Unspecified", "IPv6LinkLocalAllNodes", + "IPv6LinkLocalAllRouters", + "IPv6Loopback", "IPv6Unspecified", "MustParseAddr", "MustParseAddrPort", @@ -4686,6 +4816,7 @@ var stdlib = map[string][]string{ "CommandContext", "ErrDot", "ErrNotFound", + "ErrWaitDelay", "Error", "ExitError", "LookPath", @@ -4734,11 +4865,13 @@ var stdlib = map[string][]string{ "Glob", "HasPrefix", "IsAbs", + "IsLocal", "Join", "ListSeparator", "Match", "Rel", "Separator", + "SkipAll", "SkipDir", "Split", "SplitList", @@ -4860,6 +4993,7 @@ var stdlib = map[string][]string{ "ErrInvalidRepeatOp", "ErrInvalidRepeatSize", "ErrInvalidUTF8", + "ErrLarge", "ErrMissingBracket", "ErrMissingParen", "ErrMissingRepeatArgument", @@ -4968,8 +5102,16 @@ var stdlib = map[string][]string{ }, "runtime/cgo": { "Handle", + "Incomplete", "NewHandle", }, + "runtime/coverage": { + "ClearCounters", + "WriteCounters", + "WriteCountersDir", + "WriteMeta", + "WriteMetaDir", + }, "runtime/debug": { "BuildInfo", "BuildSetting", @@ -5103,6 +5245,8 @@ var stdlib = map[string][]string{ "ContainsRune", "Count", "Cut", + "CutPrefix", + "CutSuffix", "EqualFold", "Fields", "FieldsFunc", @@ -5272,6 +5416,7 @@ var stdlib = map[string][]string{ "AF_TIPC", "AF_UNIX", "AF_UNSPEC", + "AF_UTUN", "AF_VENDOR00", "AF_VENDOR01", "AF_VENDOR02", @@ -5610,20 +5755,25 @@ var stdlib = map[string][]string{ "CLOCAL", "CLONE_CHILD_CLEARTID", "CLONE_CHILD_SETTID", + "CLONE_CLEAR_SIGHAND", "CLONE_CSIGNAL", "CLONE_DETACHED", "CLONE_FILES", "CLONE_FS", + "CLONE_INTO_CGROUP", "CLONE_IO", + "CLONE_NEWCGROUP", "CLONE_NEWIPC", "CLONE_NEWNET", "CLONE_NEWNS", "CLONE_NEWPID", + "CLONE_NEWTIME", "CLONE_NEWUSER", "CLONE_NEWUTS", "CLONE_PARENT", "CLONE_PARENT_SETTID", "CLONE_PID", + "CLONE_PIDFD", "CLONE_PTRACE", "CLONE_SETTLS", "CLONE_SIGHAND", @@ -6166,6 +6316,7 @@ var stdlib = map[string][]string{ "EPROTONOSUPPORT", "EPROTOTYPE", "EPWROFF", + "EQFULL", "ERANGE", "EREMCHG", "EREMOTE", @@ -6592,6 +6743,7 @@ var stdlib = map[string][]string{ "F_DUPFD", "F_DUPFD_CLOEXEC", "F_EXLCK", + "F_FINDSIGS", "F_FLUSH_DATA", "F_FREEZE_FS", "F_FSCTL", @@ -6602,6 +6754,7 @@ var stdlib = map[string][]string{ "F_FSPRIV", "F_FSVOID", "F_FULLFSYNC", + "F_GETCODEDIR", "F_GETFD", "F_GETFL", "F_GETLEASE", @@ -6615,6 +6768,7 @@ var stdlib = map[string][]string{ "F_GETPATH_MTMINFO", "F_GETPIPE_SZ", "F_GETPROTECTIONCLASS", + "F_GETPROTECTIONLEVEL", "F_GETSIG", "F_GLOBAL_NOCACHE", "F_LOCK", @@ -6647,6 +6801,7 @@ var stdlib = map[string][]string{ "F_SETLK64", "F_SETLKW", "F_SETLKW64", + "F_SETLKWTIMEOUT", "F_SETLK_REMOTE", "F_SETNOSIGPIPE", "F_SETOWN", @@ -6656,9 +6811,11 @@ var stdlib = map[string][]string{ "F_SETSIG", "F_SETSIZE", "F_SHLCK", + "F_SINGLE_WRITER", "F_TEST", "F_THAW_FS", "F_TLOCK", + "F_TRANSCODEKEY", "F_ULOCK", "F_UNLCK", "F_UNLCKSYS", @@ -7854,12 +8011,20 @@ var stdlib = map[string][]string{ "NOFLSH", "NOTE_ABSOLUTE", "NOTE_ATTRIB", + "NOTE_BACKGROUND", "NOTE_CHILD", + "NOTE_CRITICAL", "NOTE_DELETE", "NOTE_EOF", "NOTE_EXEC", "NOTE_EXIT", "NOTE_EXITSTATUS", + "NOTE_EXIT_CSERROR", + "NOTE_EXIT_DECRYPTFAIL", + "NOTE_EXIT_DETAIL", + "NOTE_EXIT_DETAIL_MASK", + "NOTE_EXIT_MEMORY", + "NOTE_EXIT_REPARENTED", "NOTE_EXTEND", "NOTE_FFAND", "NOTE_FFCOPY", @@ -7868,6 +8033,7 @@ var stdlib = map[string][]string{ "NOTE_FFNOP", "NOTE_FFOR", "NOTE_FORK", + "NOTE_LEEWAY", "NOTE_LINK", "NOTE_LOWAT", "NOTE_NONE", @@ -7946,6 +8112,7 @@ var stdlib = map[string][]string{ "O_CREAT", "O_DIRECT", "O_DIRECTORY", + "O_DP_GETRAWENCRYPTED", "O_DSYNC", "O_EVTONLY", "O_EXCL", @@ -8235,6 +8402,7 @@ var stdlib = map[string][]string{ "RLIMIT_AS", "RLIMIT_CORE", "RLIMIT_CPU", + "RLIMIT_CPU_USAGE_MONITOR", "RLIMIT_DATA", "RLIMIT_FSIZE", "RLIMIT_NOFILE", @@ -8347,9 +8515,11 @@ var stdlib = map[string][]string{ "RTF_PROTO1", "RTF_PROTO2", "RTF_PROTO3", + "RTF_PROXY", "RTF_REINSTATE", "RTF_REJECT", "RTF_RNH_LOCKED", + "RTF_ROUTER", "RTF_SOURCE", "RTF_SRC", "RTF_STATIC", @@ -8868,6 +9038,7 @@ var stdlib = map[string][]string{ "SO_NO_OFFLOAD", "SO_NP_EXTENSIONS", "SO_NREAD", + "SO_NUMRCVPKT", "SO_NWRITE", "SO_OOBINLINE", "SO_OVERFLOWED", @@ -9037,6 +9208,7 @@ var stdlib = map[string][]string{ "SYS_CREAT", "SYS_CREATE_MODULE", "SYS_CSOPS", + "SYS_CSOPS_AUDITTOKEN", "SYS_DELETE", "SYS_DELETE_MODULE", "SYS_DUP", @@ -9223,6 +9395,7 @@ var stdlib = map[string][]string{ "SYS_JAIL_GET", "SYS_JAIL_REMOVE", "SYS_JAIL_SET", + "SYS_KAS_INFO", "SYS_KDEBUG_TRACE", "SYS_KENV", "SYS_KEVENT", @@ -9250,6 +9423,7 @@ var stdlib = map[string][]string{ "SYS_LCHMOD", "SYS_LCHOWN", "SYS_LCHOWN32", + "SYS_LEDGER", "SYS_LGETFH", "SYS_LGETXATTR", "SYS_LINK", @@ -9346,6 +9520,7 @@ var stdlib = map[string][]string{ "SYS_OPENAT", "SYS_OPENBSD_POLL", "SYS_OPEN_BY_HANDLE_AT", + "SYS_OPEN_DPROTECTED_NP", "SYS_OPEN_EXTENDED", "SYS_OPEN_NOCANCEL", "SYS_OVADVISE", @@ -9978,6 +10153,7 @@ var stdlib = map[string][]string{ "TCP_CONNECTIONTIMEOUT", "TCP_CORK", "TCP_DEFER_ACCEPT", + "TCP_ENABLE_ECN", "TCP_INFO", "TCP_KEEPALIVE", "TCP_KEEPCNT", @@ -10000,11 +10176,13 @@ var stdlib = map[string][]string{ "TCP_NODELAY", "TCP_NOOPT", "TCP_NOPUSH", + "TCP_NOTSENT_LOWAT", "TCP_NSTATES", "TCP_QUICKACK", "TCP_RXT_CONNDROPTIME", "TCP_RXT_FINDROP", "TCP_SACK_ENABLE", + "TCP_SENDMOREACKS", "TCP_SYNCNT", "TCP_VENDOR", "TCP_WINDOW_CLAMP", @@ -10540,6 +10718,8 @@ var stdlib = map[string][]string{ "April", "August", "Date", + "DateOnly", + "DateTime", "December", "Duration", "February", @@ -10594,6 +10774,7 @@ var stdlib = map[string][]string{ "Tick", "Ticker", "Time", + "TimeOnly", "Timer", "Tuesday", "UTC", @@ -10892,6 +11073,7 @@ var stdlib = map[string][]string{ "Zs", }, "unicode/utf16": { + "AppendRune", "Decode", "DecodeRune", "Encode", @@ -10920,10 +11102,14 @@ var stdlib = map[string][]string{ "ValidString", }, "unsafe": { + "Add", "Alignof", - "ArbitraryType", "Offsetof", "Pointer", "Sizeof", + "Slice", + "SliceData", + "String", + "StringData", }, } diff --git a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go index 3205c9a16c568..b92e8e6eb3299 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go @@ -373,7 +373,7 @@ func (r *Decoder) Int64() int64 { return r.rawVarint() } -// Int64 decodes and returns a uint64 value from the element bitstream. +// Uint64 decodes and returns a uint64 value from the element bitstream. func (r *Decoder) Uint64() uint64 { r.Sync(SyncUint64) return r.rawUvarint() diff --git a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go index e98e41171e1b6..6482617a4fccd 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go @@ -293,7 +293,7 @@ func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) } // Int encodes and writes an int value into the element bitstream. func (w *Encoder) Int(x int) { w.Int64(int64(x)) } -// Len encodes and writes a uint value into the element bitstream. +// Uint encodes and writes a uint value into the element bitstream. func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) } // Reloc encodes and writes a relocation for the given (section, diff --git a/vendor/golang.org/x/tools/internal/robustio/gopls_windows.go b/vendor/golang.org/x/tools/internal/robustio/gopls_windows.go new file mode 100644 index 0000000000000..949f27816199a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/robustio/gopls_windows.go @@ -0,0 +1,16 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package robustio + +import "syscall" + +// The robustio package is copied from cmd/go/internal/robustio, a package used +// by the go command to retry known flaky operations on certain operating systems. + +//go:generate go run copyfiles.go + +// Since the gopls module cannot access internal/syscall/windows, copy a +// necessary constant. +const ERROR_SHARING_VIOLATION syscall.Errno = 32 diff --git a/vendor/golang.org/x/tools/internal/robustio/robustio.go b/vendor/golang.org/x/tools/internal/robustio/robustio.go new file mode 100644 index 0000000000000..0a559fc9b80c8 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/robustio/robustio.go @@ -0,0 +1,69 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package robustio wraps I/O functions that are prone to failure on Windows, +// transparently retrying errors up to an arbitrary timeout. +// +// Errors are classified heuristically and retries are bounded, so the functions +// in this package do not completely eliminate spurious errors. However, they do +// significantly reduce the rate of failure in practice. +// +// If so, the error will likely wrap one of: +// The functions in this package do not completely eliminate spurious errors, +// but substantially reduce their rate of occurrence in practice. +package robustio + +import "time" + +// Rename is like os.Rename, but on Windows retries errors that may occur if the +// file is concurrently read or overwritten. +// +// (See golang.org/issue/31247 and golang.org/issue/32188.) +func Rename(oldpath, newpath string) error { + return rename(oldpath, newpath) +} + +// ReadFile is like os.ReadFile, but on Windows retries errors that may +// occur if the file is concurrently replaced. +// +// (See golang.org/issue/31247 and golang.org/issue/32188.) +func ReadFile(filename string) ([]byte, error) { + return readFile(filename) +} + +// RemoveAll is like os.RemoveAll, but on Windows retries errors that may occur +// if an executable file in the directory has recently been executed. +// +// (See golang.org/issue/19491.) +func RemoveAll(path string) error { + return removeAll(path) +} + +// IsEphemeralError reports whether err is one of the errors that the functions +// in this package attempt to mitigate. +// +// Errors considered ephemeral include: +// - syscall.ERROR_ACCESS_DENIED +// - syscall.ERROR_FILE_NOT_FOUND +// - internal/syscall/windows.ERROR_SHARING_VIOLATION +// +// This set may be expanded in the future; programs must not rely on the +// non-ephemerality of any given error. +func IsEphemeralError(err error) bool { + return isEphemeralError(err) +} + +// A FileID uniquely identifies a file in the file system. +// +// If GetFileID(name1) returns the same ID as GetFileID(name2), the two file +// names denote the same file. +// A FileID is comparable, and thus suitable for use as a map key. +type FileID struct { + device, inode uint64 +} + +// GetFileID returns the file system's identifier for the file, and its +// modification time. +// Like os.Stat, it reads through symbolic links. +func GetFileID(filename string) (FileID, time.Time, error) { return getFileID(filename) } diff --git a/vendor/golang.org/x/tools/internal/robustio/robustio_darwin.go b/vendor/golang.org/x/tools/internal/robustio/robustio_darwin.go new file mode 100644 index 0000000000000..99fd8ebc2fff1 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/robustio/robustio_darwin.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package robustio + +import ( + "errors" + "syscall" +) + +const errFileNotFound = syscall.ENOENT + +// isEphemeralError returns true if err may be resolved by waiting. +func isEphemeralError(err error) bool { + var errno syscall.Errno + if errors.As(err, &errno) { + return errno == errFileNotFound + } + return false +} diff --git a/vendor/golang.org/x/tools/internal/robustio/robustio_flaky.go b/vendor/golang.org/x/tools/internal/robustio/robustio_flaky.go new file mode 100644 index 0000000000000..c6f997244688d --- /dev/null +++ b/vendor/golang.org/x/tools/internal/robustio/robustio_flaky.go @@ -0,0 +1,93 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows || darwin +// +build windows darwin + +package robustio + +import ( + "errors" + "io/ioutil" + "math/rand" + "os" + "syscall" + "time" +) + +const arbitraryTimeout = 2000 * time.Millisecond + +// retry retries ephemeral errors from f up to an arbitrary timeout +// to work around filesystem flakiness on Windows and Darwin. +func retry(f func() (err error, mayRetry bool)) error { + var ( + bestErr error + lowestErrno syscall.Errno + start time.Time + nextSleep time.Duration = 1 * time.Millisecond + ) + for { + err, mayRetry := f() + if err == nil || !mayRetry { + return err + } + + var errno syscall.Errno + if errors.As(err, &errno) && (lowestErrno == 0 || errno < lowestErrno) { + bestErr = err + lowestErrno = errno + } else if bestErr == nil { + bestErr = err + } + + if start.IsZero() { + start = time.Now() + } else if d := time.Since(start) + nextSleep; d >= arbitraryTimeout { + break + } + time.Sleep(nextSleep) + nextSleep += time.Duration(rand.Int63n(int64(nextSleep))) + } + + return bestErr +} + +// rename is like os.Rename, but retries ephemeral errors. +// +// On Windows it wraps os.Rename, which (as of 2019-06-04) uses MoveFileEx with +// MOVEFILE_REPLACE_EXISTING. +// +// Windows also provides a different system call, ReplaceFile, +// that provides similar semantics, but perhaps preserves more metadata. (The +// documentation on the differences between the two is very sparse.) +// +// Empirical error rates with MoveFileEx are lower under modest concurrency, so +// for now we're sticking with what the os package already provides. +func rename(oldpath, newpath string) (err error) { + return retry(func() (err error, mayRetry bool) { + err = os.Rename(oldpath, newpath) + return err, isEphemeralError(err) + }) +} + +// readFile is like os.ReadFile, but retries ephemeral errors. +func readFile(filename string) ([]byte, error) { + var b []byte + err := retry(func() (err error, mayRetry bool) { + b, err = ioutil.ReadFile(filename) + + // Unlike in rename, we do not retry errFileNotFound here: it can occur + // as a spurious error, but the file may also genuinely not exist, so the + // increase in robustness is probably not worth the extra latency. + return err, isEphemeralError(err) && !errors.Is(err, errFileNotFound) + }) + return b, err +} + +func removeAll(path string) error { + return retry(func() (err error, mayRetry bool) { + err = os.RemoveAll(path) + return err, isEphemeralError(err) + }) +} diff --git a/vendor/golang.org/x/tools/internal/robustio/robustio_other.go b/vendor/golang.org/x/tools/internal/robustio/robustio_other.go new file mode 100644 index 0000000000000..c11dbf9f14b74 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/robustio/robustio_other.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows && !darwin +// +build !windows,!darwin + +package robustio + +import ( + "io/ioutil" + "os" +) + +func rename(oldpath, newpath string) error { + return os.Rename(oldpath, newpath) +} + +func readFile(filename string) ([]byte, error) { + return ioutil.ReadFile(filename) +} + +func removeAll(path string) error { + return os.RemoveAll(path) +} + +func isEphemeralError(err error) bool { + return false +} diff --git a/vendor/golang.org/x/tools/internal/robustio/robustio_plan9.go b/vendor/golang.org/x/tools/internal/robustio/robustio_plan9.go new file mode 100644 index 0000000000000..9fa4cacb5a3e0 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/robustio/robustio_plan9.go @@ -0,0 +1,26 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build plan9 +// +build plan9 + +package robustio + +import ( + "os" + "syscall" + "time" +) + +func getFileID(filename string) (FileID, time.Time, error) { + fi, err := os.Stat(filename) + if err != nil { + return FileID{}, time.Time{}, err + } + dir := fi.Sys().(*syscall.Dir) + return FileID{ + device: uint64(dir.Type)<<32 | uint64(dir.Dev), + inode: dir.Qid.Path, + }, fi.ModTime(), nil +} diff --git a/vendor/golang.org/x/tools/internal/robustio/robustio_posix.go b/vendor/golang.org/x/tools/internal/robustio/robustio_posix.go new file mode 100644 index 0000000000000..8aa13d027867a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/robustio/robustio_posix.go @@ -0,0 +1,28 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows && !plan9 +// +build !windows,!plan9 + +// TODO(adonovan): use 'unix' tag when go1.19 can be assumed. + +package robustio + +import ( + "os" + "syscall" + "time" +) + +func getFileID(filename string) (FileID, time.Time, error) { + fi, err := os.Stat(filename) + if err != nil { + return FileID{}, time.Time{}, err + } + stat := fi.Sys().(*syscall.Stat_t) + return FileID{ + device: uint64(stat.Dev), // (int32 on darwin, uint64 on linux) + inode: stat.Ino, + }, fi.ModTime(), nil +} diff --git a/vendor/golang.org/x/tools/internal/robustio/robustio_windows.go b/vendor/golang.org/x/tools/internal/robustio/robustio_windows.go new file mode 100644 index 0000000000000..616c32883d602 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/robustio/robustio_windows.go @@ -0,0 +1,51 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package robustio + +import ( + "errors" + "syscall" + "time" +) + +const errFileNotFound = syscall.ERROR_FILE_NOT_FOUND + +// isEphemeralError returns true if err may be resolved by waiting. +func isEphemeralError(err error) bool { + var errno syscall.Errno + if errors.As(err, &errno) { + switch errno { + case syscall.ERROR_ACCESS_DENIED, + syscall.ERROR_FILE_NOT_FOUND, + ERROR_SHARING_VIOLATION: + return true + } + } + return false +} + +// Note: it may be convenient to have this helper return fs.FileInfo, but +// implementing this is actually quite involved on Windows. Since we only +// currently use mtime, keep it simple. +func getFileID(filename string) (FileID, time.Time, error) { + filename16, err := syscall.UTF16PtrFromString(filename) + if err != nil { + return FileID{}, time.Time{}, err + } + h, err := syscall.CreateFile(filename16, 0, 0, nil, syscall.OPEN_EXISTING, uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS), 0) + if err != nil { + return FileID{}, time.Time{}, err + } + defer syscall.CloseHandle(h) + var i syscall.ByHandleFileInformation + if err := syscall.GetFileInformationByHandle(h, &i); err != nil { + return FileID{}, time.Time{}, err + } + mtime := time.Unix(0, i.LastWriteTime.Nanoseconds()) + return FileID{ + device: uint64(i.VolumeSerialNumber), + inode: uint64(i.FileIndexHigh)<<32 | uint64(i.FileIndexLow), + }, mtime, nil +} diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go new file mode 100644 index 0000000000000..a3fb2d4f29d0f --- /dev/null +++ b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go @@ -0,0 +1,59 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// package tokeninternal provides access to some internal features of the token +// package. +package tokeninternal + +import ( + "go/token" + "sync" + "unsafe" +) + +// GetLines returns the table of line-start offsets from a token.File. +func GetLines(file *token.File) []int { + // token.File has a Lines method on Go 1.21 and later. + if file, ok := (interface{})(file).(interface{ Lines() []int }); ok { + return file.Lines() + } + + // This declaration must match that of token.File. + // This creates a risk of dependency skew. + // For now we check that the size of the two + // declarations is the same, on the (fragile) assumption + // that future changes would add fields. + type tokenFile119 struct { + _ string + _ int + _ int + mu sync.Mutex // we're not complete monsters + lines []int + _ []struct{} + } + type tokenFile118 struct { + _ *token.FileSet // deleted in go1.19 + tokenFile119 + } + + type uP = unsafe.Pointer + switch unsafe.Sizeof(*file) { + case unsafe.Sizeof(tokenFile118{}): + var ptr *tokenFile118 + *(*uP)(uP(&ptr)) = uP(file) + ptr.mu.Lock() + defer ptr.mu.Unlock() + return ptr.lines + + case unsafe.Sizeof(tokenFile119{}): + var ptr *tokenFile119 + *(*uP)(uP(&ptr)) = uP(file) + ptr.mu.Lock() + defer ptr.mu.Unlock() + return ptr.lines + + default: + panic("unexpected token.File size") + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index c814c8714fe47..831cc61670a97 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -978,12 +978,12 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/exp/constraints golang.org/x/exp/maps golang.org/x/exp/slices -# golang.org/x/mod v0.7.0 +# golang.org/x/mod v0.8.0 ## explicit; go 1.17 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.5.0 +# golang.org/x/net v0.6.0 ## explicit; go 1.17 golang.org/x/net/bpf golang.org/x/net/context @@ -1025,7 +1025,7 @@ golang.org/x/sys/windows/registry # golang.org/x/term v0.5.0 ## explicit; go 1.17 golang.org/x/term -# golang.org/x/text v0.6.0 +# golang.org/x/text v0.7.0 ## explicit; go 1.17 golang.org/x/text/encoding golang.org/x/text/encoding/charmap @@ -1050,7 +1050,7 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.3.0 ## explicit golang.org/x/time/rate -# golang.org/x/tools v0.5.0 +# golang.org/x/tools v0.6.0 ## explicit; go 1.18 golang.org/x/tools/cmd/goimports golang.org/x/tools/cover @@ -1067,6 +1067,8 @@ golang.org/x/tools/go/internal/packagesdriver golang.org/x/tools/go/packages golang.org/x/tools/go/types/objectpath golang.org/x/tools/imports +golang.org/x/tools/internal/diff +golang.org/x/tools/internal/diff/lcs golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core golang.org/x/tools/internal/event/keys @@ -1079,6 +1081,8 @@ golang.org/x/tools/internal/gopathwalk golang.org/x/tools/internal/imports golang.org/x/tools/internal/packagesinternal golang.org/x/tools/internal/pkgbits +golang.org/x/tools/internal/robustio +golang.org/x/tools/internal/tokeninternal golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal # golang.zx2c4.com/wireguard v0.0.0-20211017052713-f87e87af0d9a