From 8dc70e91ea4b216d641e753b213a26f12d261e7b Mon Sep 17 00:00:00 2001 From: Igor Sirotin Date: Wed, 23 Oct 2024 12:35:07 +0100 Subject: [PATCH 1/7] fix_: functional tests (#5979) * fix_: generate on test-functional * chore(test)_: fix functional test assertion --------- Co-authored-by: Siddarth Kumar --- Makefile | 1 + tests-functional/tests/test_router.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 42ce5451816..3804269da33 100644 --- a/Makefile +++ b/Makefile @@ -398,6 +398,7 @@ test-e2e: ##@tests Run e2e tests test-e2e-race: export GOTEST_EXTRAFLAGS=-race test-e2e-race: test-e2e ##@tests Run e2e tests with -race flag +test-functional: generate test-functional: export FUNCTIONAL_TESTS_DOCKER_UID ?= $(call sh, id -u) test-functional: export FUNCTIONAL_TESTS_REPORT_CODECOV ?= false test-functional: diff --git a/tests-functional/tests/test_router.py b/tests-functional/tests/test_router.py index bc5008422f3..5af0fa508c3 100644 --- a/tests-functional/tests/test_router.py +++ b/tests-functional/tests/test_router.py @@ -111,5 +111,5 @@ def test_tx_from_route(self): tx_details = response.json()["result"] assert tx_details["value"] == amount_in - assert tx_details["to"] == user_2.address - assert tx_details["from"] == user_1.address + assert tx_details["to"].upper() == user_2.address.upper() + assert tx_details["from"].upper() == user_1.address.upper() From 132ea05fc8735b91922a440009efdbc7f1b8b8ce Mon Sep 17 00:00:00 2001 From: Jonathan Rainville Date: Wed, 23 Oct 2024 09:53:25 -0400 Subject: [PATCH 2/7] feat(accounts)_: cherry-pick Persist acceptance of Terms of Use & Privacy policy (#5766) (#5977) * feat(accounts)_: Persist acceptance of Terms of Use & Privacy policy (#5766) The original GH issue https://github.com/status-im/status-mobile/issues/21113 came from a request from the Legal team. We must show to Status v1 users the new terms (Terms of Use & Privacy Policy) right after they upgrade to Status v2 from the stores. The solution we use is to create a flag in the accounts table, named hasAcceptedTerms. The flag will be set to true on the first account ever created in v2 and we provide a native call in mobile/status.go#AcceptTerms, which allows the client to persist the user's choice in case they are upgrading (from v1 -> v2, or from a v2 older than this PR). This solution is not the best because we should store the setting in a separate table, not in the accounts table. Related Mobile PR https://github.com/status-im/status-mobile/pull/21124 * fix(test)_: Compare addresses using uppercased strings --------- Co-authored-by: Icaro Motta --- api/backend_test.go | 25 +++ api/geth_backend.go | 26 +++ ...obile_user_upgrading_from_v1_to_v2_test.go | 5 + mobile/status.go | 9 + multiaccounts/database.go | 24 ++- multiaccounts/database_test.go | 175 +++++++++++++++++- ..._add_has_accepted_terms_to_accounts.up.sql | 1 + 7 files changed, 255 insertions(+), 10 deletions(-) create mode 100644 multiaccounts/migrations/sql/1724407149_add_has_accepted_terms_to_accounts.up.sql diff --git a/api/backend_test.go b/api/backend_test.go index 3a40cee0e40..b65845b5f9f 100644 --- a/api/backend_test.go +++ b/api/backend_test.go @@ -855,6 +855,7 @@ func TestLoginAccount(t *testing.T) { acc, err := b.CreateAccountAndLogin(createAccountRequest) require.NoError(t, err) require.Equal(t, nameserver, b.config.WakuV2Config.Nameserver) + require.True(t, acc.HasAcceptedTerms) waitForLogin(c) require.NoError(t, b.Logout()) @@ -1684,6 +1685,30 @@ func TestRestoreAccountAndLoginWithoutDisplayName(t *testing.T) { require.NotEmpty(t, account.Name) } +func TestAcceptTerms(t *testing.T) { + tmpdir := t.TempDir() + b := NewGethStatusBackend() + conf, err := params.NewNodeConfig(tmpdir, 1777) + require.NoError(t, err) + require.NoError(t, b.AccountManager().InitKeystore(conf.KeyStoreDir)) + b.UpdateRootDataDir(conf.DataDir) + require.NoError(t, b.OpenAccounts()) + nameserver := "8.8.8.8" + createAccountRequest := &requests.CreateAccount{ + DisplayName: "some-display-name", + CustomizationColor: "#ffffff", + Password: "some-password", + RootDataDir: tmpdir, + LogFilePath: tmpdir + "/log", + WakuV2Nameserver: &nameserver, + WakuV2Fleet: "status.staging", + } + _, err = b.CreateAccountAndLogin(createAccountRequest) + require.NoError(t, err) + err = b.AcceptTerms() + require.NoError(t, err) +} + func TestCreateAccountPathsValidation(t *testing.T) { tmpdir := t.TempDir() diff --git a/api/geth_backend.go b/api/geth_backend.go index 1be579c3640..c76d2b5eb5a 100644 --- a/api/geth_backend.go +++ b/api/geth_backend.go @@ -238,6 +238,24 @@ func (b *GethStatusBackend) GetAccounts() ([]multiaccounts.Account, error) { return b.multiaccountsDB.GetAccounts() } +func (b *GethStatusBackend) AcceptTerms() error { + b.mu.Lock() + defer b.mu.Unlock() + if b.multiaccountsDB == nil { + return errors.New("accounts db wasn't initialized") + } + + accounts, err := b.multiaccountsDB.GetAccounts() + if err != nil { + return err + } + if len(accounts) == 0 { + return errors.New("accounts is empty") + } + + return b.multiaccountsDB.UpdateHasAcceptedTerms(accounts[0].KeyUID, true) +} + func (b *GethStatusBackend) getAccountByKeyUID(keyUID string) (*multiaccounts.Account, error) { b.mu.Lock() defer b.mu.Unlock() @@ -1580,6 +1598,14 @@ func (b *GethStatusBackend) buildAccount(request *requests.CreateAccount, input acc.KDFIterations = dbsetup.ReducedKDFIterationsNumber } + count, err := b.multiaccountsDB.GetAccountsCount() + if err != nil { + return nil, err + } + if count == 0 { + acc.HasAcceptedTerms = true + } + if request.ImagePath != "" { imageCropRectangle := request.ImageCropRectangle if imageCropRectangle == nil { diff --git a/api/old_mobile_user_upgrading_from_v1_to_v2_test.go b/api/old_mobile_user_upgrading_from_v1_to_v2_test.go index 58eb52acf99..8f651df7d3e 100644 --- a/api/old_mobile_user_upgrading_from_v1_to_v2_test.go +++ b/api/old_mobile_user_upgrading_from_v1_to_v2_test.go @@ -141,6 +141,11 @@ func (s *OldMobileUserUpgradingFromV1ToV2Test) TestLoginAndMigrationsStillWorkWi s.Require().True(len(keyKps[0].Accounts) == 1) info, err = generator.LoadAccount(keyKps[0].Accounts[0].Address.Hex(), oldMobileUserPasswd) s.Require().NoError(err) + + // The user should manually accept terms, so we make sure we don't set it + // automatically by mistake. + s.Require().False(info.ToMultiAccount().HasAcceptedTerms) + s.Require().Equal(keyKps[0].KeyUID, info.KeyUID) s.Require().Equal(keyKps[0].Accounts[0].KeyUID, info.KeyUID) info, err = generator.ImportPrivateKey("c3ad0b50652318f845565c13761e5369ce75dcbc2a94616e15b829d4b07410fe") diff --git a/mobile/status.go b/mobile/status.go index a121d85b4df..4c90c64002c 100644 --- a/mobile/status.go +++ b/mobile/status.go @@ -443,6 +443,15 @@ func createAccountAndLogin(requestJSON string) string { return makeJSONResponse(nil) } +func AcceptTerms() string { + return callWithResponse(acceptTerms) +} + +func acceptTerms() string { + err := statusBackend.AcceptTerms() + return makeJSONResponse(err) +} + func LoginAccount(requestJSON string) string { return callWithResponse(loginAccount, requestJSON) } diff --git a/multiaccounts/database.go b/multiaccounts/database.go index f3b427409f1..799acda1f80 100644 --- a/multiaccounts/database.go +++ b/multiaccounts/database.go @@ -29,6 +29,9 @@ type Account struct { Images []images.IdentityImage `json:"images"` KDFIterations int `json:"kdfIterations,omitempty"` CustomizationColorClock uint64 `json:"-"` + + // HasAcceptedTerms will be set to true when the first account is created. + HasAcceptedTerms bool `json:"hasAcceptedTerms"` } func (a *Account) RefersToKeycard() bool { @@ -145,7 +148,7 @@ func (db *Database) GetAccountKDFIterationsNumber(keyUID string) (kdfIterationsN } func (db *Database) GetAccounts() (rst []Account, err error) { - rows, err := db.db.Query("SELECT a.name, a.loginTimestamp, a.identicon, a.colorHash, a.colorId, a.customizationColor, a.customizationColorClock, a.keycardPairing, a.keyUid, a.kdfIterations, ii.name, ii.image_payload, ii.width, ii.height, ii.file_size, ii.resize_target, ii.clock FROM accounts AS a LEFT JOIN identity_images AS ii ON ii.key_uid = a.keyUid ORDER BY loginTimestamp DESC") + rows, err := db.db.Query("SELECT a.name, a.loginTimestamp, a.identicon, a.colorHash, a.colorId, a.customizationColor, a.customizationColorClock, a.keycardPairing, a.keyUid, a.kdfIterations, a.hasAcceptedTerms, ii.name, ii.image_payload, ii.width, ii.height, ii.file_size, ii.resize_target, ii.clock FROM accounts AS a LEFT JOIN identity_images AS ii ON ii.key_uid = a.keyUid ORDER BY loginTimestamp DESC") if err != nil { return nil, err } @@ -179,6 +182,7 @@ func (db *Database) GetAccounts() (rst []Account, err error) { &acc.KeycardPairing, &acc.KeyUID, &acc.KDFIterations, + &acc.HasAcceptedTerms, &iiName, &ii.Payload, &iiWidth, @@ -236,8 +240,14 @@ func (db *Database) GetAccounts() (rst []Account, err error) { return rst, nil } +func (db *Database) GetAccountsCount() (int, error) { + var count int + err := db.db.QueryRow("SELECT COUNT(1) FROM accounts").Scan(&count) + return count, err +} + func (db *Database) GetAccount(keyUID string) (*Account, error) { - rows, err := db.db.Query("SELECT a.name, a.loginTimestamp, a.identicon, a.colorHash, a.colorId, a.customizationColor, a.customizationColorClock, a.keycardPairing, a.keyUid, a.kdfIterations, ii.key_uid, ii.name, ii.image_payload, ii.width, ii.height, ii.file_size, ii.resize_target, ii.clock FROM accounts AS a LEFT JOIN identity_images AS ii ON ii.key_uid = a.keyUid WHERE a.keyUid = ? ORDER BY loginTimestamp DESC", keyUID) + rows, err := db.db.Query("SELECT a.name, a.loginTimestamp, a.identicon, a.colorHash, a.colorId, a.customizationColor, a.customizationColorClock, a.keycardPairing, a.keyUid, a.kdfIterations, a.hasAcceptedTerms, ii.key_uid, ii.name, ii.image_payload, ii.width, ii.height, ii.file_size, ii.resize_target, ii.clock FROM accounts AS a LEFT JOIN identity_images AS ii ON ii.key_uid = a.keyUid WHERE a.keyUid = ? ORDER BY loginTimestamp DESC", keyUID) if err != nil { return nil, err } @@ -273,6 +283,7 @@ func (db *Database) GetAccount(keyUID string) (*Account, error) { &acc.KeycardPairing, &acc.KeyUID, &acc.KDFIterations, + &acc.HasAcceptedTerms, &iiKeyUID, &iiName, &ii.Payload, @@ -323,7 +334,7 @@ func (db *Database) SaveAccount(account Account) error { account.KDFIterations = dbsetup.ReducedKDFIterationsNumber } - _, err = db.db.Exec("INSERT OR REPLACE INTO accounts (name, identicon, colorHash, colorId, customizationColor, customizationColorClock, keycardPairing, keyUid, kdfIterations, loginTimestamp) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", account.Name, account.Identicon, colorHash, account.ColorID, account.CustomizationColor, account.CustomizationColorClock, account.KeycardPairing, account.KeyUID, account.KDFIterations, account.Timestamp) + _, err = db.db.Exec("INSERT OR REPLACE INTO accounts (name, identicon, colorHash, colorId, customizationColor, customizationColorClock, keycardPairing, keyUid, kdfIterations, loginTimestamp, hasAcceptedTerms) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", account.Name, account.Identicon, colorHash, account.ColorID, account.CustomizationColor, account.CustomizationColorClock, account.KeycardPairing, account.KeyUID, account.KDFIterations, account.Timestamp, account.HasAcceptedTerms) if err != nil { return err } @@ -340,6 +351,11 @@ func (db *Database) UpdateDisplayName(keyUID string, displayName string) error { return err } +func (db *Database) UpdateHasAcceptedTerms(keyUID string, hasAcceptedTerms bool) error { + _, err := db.db.Exec("UPDATE accounts SET hasAcceptedTerms = ? WHERE keyUid = ?", hasAcceptedTerms, keyUID) + return err +} + func (db *Database) UpdateAccount(account Account) error { colorHash, err := json.Marshal(account.ColorHash) if err != nil { @@ -350,7 +366,7 @@ func (db *Database) UpdateAccount(account Account) error { account.KDFIterations = dbsetup.ReducedKDFIterationsNumber } - _, err = db.db.Exec("UPDATE accounts SET name = ?, identicon = ?, colorHash = ?, colorId = ?, customizationColor = ?, customizationColorClock = ?, keycardPairing = ?, kdfIterations = ? WHERE keyUid = ?", account.Name, account.Identicon, colorHash, account.ColorID, account.CustomizationColor, account.CustomizationColorClock, account.KeycardPairing, account.KDFIterations, account.KeyUID) + _, err = db.db.Exec("UPDATE accounts SET name = ?, identicon = ?, colorHash = ?, colorId = ?, customizationColor = ?, customizationColorClock = ?, keycardPairing = ?, kdfIterations = ?, hasAcceptedTerms = ? WHERE keyUid = ?", account.Name, account.Identicon, colorHash, account.ColorID, account.CustomizationColor, account.CustomizationColorClock, account.KeycardPairing, account.KDFIterations, account.HasAcceptedTerms, account.KeyUID) return err } diff --git a/multiaccounts/database_test.go b/multiaccounts/database_test.go index a4cdce33dba..a6647ea6c0a 100644 --- a/multiaccounts/database_test.go +++ b/multiaccounts/database_test.go @@ -4,6 +4,7 @@ import ( "encoding/json" "io/ioutil" "os" + "strings" "testing" "github.com/status-im/status-go/common/dbsetup" @@ -39,10 +40,17 @@ func TestAccounts(t *testing.T) { func TestAccountsUpdate(t *testing.T) { db, stop := setupTestDB(t) defer stop() - expected := Account{KeyUID: "string", CustomizationColor: common.CustomizationColorBlue, ColorHash: ColorHash{{4, 3}, {4, 0}, {4, 3}, {4, 0}}, ColorID: 10, KDFIterations: dbsetup.ReducedKDFIterationsNumber} + expected := Account{ + KeyUID: "string", + CustomizationColor: common.CustomizationColorBlue, + ColorHash: ColorHash{{4, 3}, {4, 0}, {4, 3}, {4, 0}}, + ColorID: 10, + KDFIterations: dbsetup.ReducedKDFIterationsNumber, + } require.NoError(t, db.SaveAccount(expected)) expected.Name = "chars" expected.CustomizationColor = common.CustomizationColorMagenta + expected.HasAcceptedTerms = true require.NoError(t, db.UpdateAccount(expected)) rst, err := db.GetAccounts() require.NoError(t, err) @@ -50,6 +58,53 @@ func TestAccountsUpdate(t *testing.T) { require.Equal(t, expected, rst[0]) } +func TestUpdateHasAcceptedTerms(t *testing.T) { + db, stop := setupTestDB(t) + defer stop() + keyUID := "string" + expected := Account{ + KeyUID: keyUID, + KDFIterations: dbsetup.ReducedKDFIterationsNumber, + } + require.NoError(t, db.SaveAccount(expected)) + accounts, err := db.GetAccounts() + require.NoError(t, err) + require.Equal(t, []Account{expected}, accounts) + + // Update from false -> true + require.NoError(t, db.UpdateHasAcceptedTerms(keyUID, true)) + account, err := db.GetAccount(keyUID) + require.NoError(t, err) + expected.HasAcceptedTerms = true + require.Equal(t, &expected, account) + + // Update from true -> false + require.NoError(t, db.UpdateHasAcceptedTerms(keyUID, false)) + account, err = db.GetAccount(keyUID) + require.NoError(t, err) + expected.HasAcceptedTerms = false + require.Equal(t, &expected, account) +} + +func TestDatabase_GetAccountsCount(t *testing.T) { + db, stop := setupTestDB(t) + defer stop() + + count, err := db.GetAccountsCount() + require.NoError(t, err) + require.Equal(t, 0, count) + + account := Account{ + KeyUID: keyUID, + KDFIterations: dbsetup.ReducedKDFIterationsNumber, + } + require.NoError(t, db.SaveAccount(account)) + + count, err = db.GetAccountsCount() + require.NoError(t, err) + require.Equal(t, 1, count) +} + func TestLoginUpdate(t *testing.T) { db, stop := setupTestDB(t) defer stop() @@ -148,20 +203,26 @@ func TestDatabase_DeleteIdentityImage(t *testing.T) { require.Empty(t, oii) } +func removeAllWhitespace(s string) string { + tmp := strings.ReplaceAll(s, " ", "") + tmp = strings.ReplaceAll(tmp, "\n", "") + tmp = strings.ReplaceAll(tmp, "\t", "") + return tmp +} + func TestDatabase_GetAccountsWithIdentityImages(t *testing.T) { db, stop := setupTestDB(t) defer stop() testAccs := []Account{ - {Name: "string", KeyUID: keyUID, Identicon: "data"}, + {Name: "string", KeyUID: keyUID, Identicon: "data", HasAcceptedTerms: true}, {Name: "string", KeyUID: keyUID2}, {Name: "string", KeyUID: keyUID2 + "2"}, {Name: "string", KeyUID: keyUID2 + "3"}, } - expected := `[{"name":"string","timestamp":100,"identicon":"data","colorHash":null,"colorId":0,"keycard-pairing":"","key-uid":"0xdeadbeef","images":[{"keyUid":"0xdeadbeef","type":"large","uri":"data:image/png;base64,iVBORw0KGgoAAAANSUg=","width":240,"height":300,"fileSize":1024,"resizeTarget":240,"clock":0},{"keyUid":"0xdeadbeef","type":"thumbnail","uri":"data:image/jpeg;base64,/9j/2wCEAFA3PEY8MlA=","width":80,"height":80,"fileSize":256,"resizeTarget":80,"clock":0}],"kdfIterations":3200},{"name":"string","timestamp":10,"identicon":"","colorHash":null,"colorId":0,"keycard-pairing":"","key-uid":"0x1337beef","images":null,"kdfIterations":3200},{"name":"string","timestamp":0,"identicon":"","colorHash":null,"colorId":0,"keycard-pairing":"","key-uid":"0x1337beef2","images":null,"kdfIterations":3200},{"name":"string","timestamp":0,"identicon":"","colorHash":null,"colorId":0,"keycard-pairing":"","key-uid":"0x1337beef3","images":[{"keyUid":"0x1337beef3","type":"large","uri":"data:image/png;base64,iVBORw0KGgoAAAANSUg=","width":240,"height":300,"fileSize":1024,"resizeTarget":240,"clock":0},{"keyUid":"0x1337beef3","type":"thumbnail","uri":"data:image/jpeg;base64,/9j/2wCEAFA3PEY8MlA=","width":80,"height":80,"fileSize":256,"resizeTarget":80,"clock":0}],"kdfIterations":3200}]` for _, a := range testAccs { - require.NoError(t, db.SaveAccount(a)) + require.NoError(t, db.SaveAccount(a), a.KeyUID) } seedTestDBWithIdentityImages(t, db, keyUID) @@ -178,14 +239,116 @@ func TestDatabase_GetAccountsWithIdentityImages(t *testing.T) { accJSON, err := json.Marshal(accs) require.NoError(t, err) - require.Exactly(t, expected, string(accJSON)) + expected := ` +[ + { + "name": "string", + "timestamp": 100, + "identicon": "data", + "colorHash": null, + "colorId": 0, + "keycard-pairing": "", + "key-uid": "0xdeadbeef", + "images": [ + { + "keyUid": "0xdeadbeef", + "type": "large", + "uri": "data:image/png;base64,iVBORw0KGgoAAAANSUg=", + "width": 240, + "height": 300, + "fileSize": 1024, + "resizeTarget": 240, + "clock": 0 + }, + { + "keyUid": "0xdeadbeef", + "type": "thumbnail", + "uri": "data:image/jpeg;base64,/9j/2wCEAFA3PEY8MlA=", + "width": 80, + "height": 80, + "fileSize": 256, + "resizeTarget": 80, + "clock": 0 + } + ], + "kdfIterations": 3200, + "hasAcceptedTerms": true + }, + { + "name": "string", + "timestamp": 10, + "identicon": "", + "colorHash": null, + "colorId": 0, + "keycard-pairing": "", + "key-uid": "0x1337beef", + "images": null, + "kdfIterations": 3200, + "hasAcceptedTerms": false + }, + { + "name": "string", + "timestamp": 0, + "identicon": "", + "colorHash": null, + "colorId": 0, + "keycard-pairing": "", + "key-uid": "0x1337beef2", + "images": null, + "kdfIterations": 3200, + "hasAcceptedTerms": false + }, + { + "name": "string", + "timestamp": 0, + "identicon": "", + "colorHash": null, + "colorId": 0, + "keycard-pairing": "", + "key-uid": "0x1337beef3", + "images": [ + { + "keyUid": "0x1337beef3", + "type": "large", + "uri": "data:image/png;base64,iVBORw0KGgoAAAANSUg=", + "width": 240, + "height": 300, + "fileSize": 1024, + "resizeTarget": 240, + "clock": 0 + }, + { + "keyUid": "0x1337beef3", + "type": "thumbnail", + "uri": "data:image/jpeg;base64,/9j/2wCEAFA3PEY8MlA=", + "width": 80, + "height": 80, + "fileSize": 256, + "resizeTarget": 80, + "clock": 0 + } + ], + "kdfIterations": 3200, + "hasAcceptedTerms": false + } +] +` + + require.Exactly(t, removeAllWhitespace(expected), string(accJSON)) } func TestDatabase_GetAccount(t *testing.T) { db, stop := setupTestDB(t) defer stop() - expected := Account{Name: "string", KeyUID: keyUID, ColorHash: ColorHash{{4, 3}, {4, 0}, {4, 3}, {4, 0}}, ColorID: 10, KDFIterations: dbsetup.ReducedKDFIterationsNumber} + expected := Account{ + Name: "string", + KeyUID: keyUID, + ColorHash: ColorHash{{4, 3}, {4, 0}, {4, 3}, {4, 0}}, + ColorID: 10, + KDFIterations: dbsetup.ReducedKDFIterationsNumber, + HasAcceptedTerms: true, + } require.NoError(t, db.SaveAccount(expected)) account, err := db.GetAccount(expected.KeyUID) diff --git a/multiaccounts/migrations/sql/1724407149_add_has_accepted_terms_to_accounts.up.sql b/multiaccounts/migrations/sql/1724407149_add_has_accepted_terms_to_accounts.up.sql new file mode 100644 index 00000000000..d346f880297 --- /dev/null +++ b/multiaccounts/migrations/sql/1724407149_add_has_accepted_terms_to_accounts.up.sql @@ -0,0 +1 @@ +ALTER TABLE accounts ADD COLUMN hasAcceptedTerms BOOLEAN NOT NULL DEFAULT FALSE; From 055533125220dd7e7fd78ccf58c521ab322aba61 Mon Sep 17 00:00:00 2001 From: Yevheniia Berdnyk Date: Wed, 23 Oct 2024 22:48:33 +0300 Subject: [PATCH 3/7] test_: restore account (#5960) --- protocol/requests/create_account.go | 2 +- tests-functional/clients/status_backend.py | 56 +++++++++++++++++-- tests-functional/conftest.py | 2 +- tests-functional/constants.py | 7 ++- tests-functional/pytest.ini | 2 + .../schemas/wallet_getEthereumChains | 2 +- tests-functional/tests/test_accounts.py | 4 +- tests-functional/tests/test_cases.py | 34 +++++------ .../tests/test_init_status_app.py | 4 +- tests-functional/tests/test_waku_rpc.py | 11 ++-- tests-functional/tests/test_wallet_rpc.py | 4 +- 11 files changed, 85 insertions(+), 43 deletions(-) diff --git a/protocol/requests/create_account.go b/protocol/requests/create_account.go index d1aa4555c07..45a0e87281b 100644 --- a/protocol/requests/create_account.go +++ b/protocol/requests/create_account.go @@ -66,7 +66,7 @@ type CreateAccount struct { // If you want to use non-default network, use NetworkID. CurrentNetwork string `json:"currentNetwork"` NetworkID *uint64 `json:"networkId"` - TestOverrideNetworks []params.Network `json:"-"` // This is used for testing purposes only + TestOverrideNetworks []params.Network `json:"networksOverride"` // This is used for testing purposes only TestNetworksEnabled bool `json:"testNetworksEnabled"` diff --git a/tests-functional/clients/status_backend.py b/tests-functional/clients/status_backend.py index 6fb7313cb24..71e9e06bba4 100644 --- a/tests-functional/clients/status_backend.py +++ b/tests-functional/clients/status_backend.py @@ -1,6 +1,8 @@ import json import logging +import time from datetime import datetime +from json import JSONDecodeError import jsonschema import requests @@ -27,7 +29,7 @@ def _check_decode_and_key_errors_in_response(self, response, key): f"Key '{key}' not found in the JSON response: {response.content}") def verify_is_valid_json_rpc_response(self, response, _id=None): - assert response.status_code == 200 + assert response.status_code == 200, f"Got response {response.content}, status code {response.status_code}" assert response.content self._check_decode_and_key_errors_in_response(response, "result") @@ -53,7 +55,10 @@ def rpc_request(self, method, params=[], request_id=13, url=None): data["params"] = params logging.info(f"Sending POST request to url {url} with data: {json.dumps(data, sort_keys=True, indent=4)}") response = self.client.post(url, json=data) - + try: + logging.info(f"Got response: {json.dumps(response.json(), sort_keys=True, indent=4)}") + except JSONDecodeError: + logging.info(f"Got response: {response.content}") return response def rpc_valid_request(self, method, params=[], _id=None, url=None): @@ -69,7 +74,7 @@ def verify_json_schema(self, response, method): class StatusBackend(RpcClient, SignalClient): - def __init__(self, await_signals): + def __init__(self, await_signals=list()): self.api_url = f"{option.rpc_url_status_backend}/statusgo" self.ws_url = f"{option.ws_url_status_backend}" @@ -81,12 +86,15 @@ def __init__(self, await_signals): def api_request(self, method, data, url=None): url = url if url else self.api_url url = f"{url}/{method}" + logging.info(f"Sending POST request to url {url} with data: {json.dumps(data, sort_keys=True, indent=4)}") response = requests.post(url, json=data) + logging.info(f"Got response: {response.content}") return response def verify_is_valid_api_response(self, response): - assert response.status_code == 200 + assert response.status_code == 200, f"Got response {response.content}, status code {response.status_code}" assert response.content + logging.info(f"Got response: {response.content}") try: assert not response.json()["error"] except json.JSONDecodeError: @@ -119,6 +127,46 @@ def create_account_and_login(self, display_name="Mr_Meeseeks", password=user_1.p } return self.api_valid_request(method, data) + def restore_account_and_login(self, display_name="Mr_Meeseeks", user=user_1): + method = "RestoreAccountAndLogin" + data = { + "rootDataDir": "/", + "displayName": display_name, + "password": user.password, + "mnemonic": user.passphrase, + "customizationColor": "blue", + "testNetworksEnabled": True, + "networkId": 31337, + "networksOverride": [ + { + "ChainID": 31337, + "ChainName": "Anvil", + "DefaultRPCURL": "http://anvil:8545", + "RPCURL": "http://anvil:8545", + "ShortName": "eth", + "NativeCurrencyName": "Ether", + "NativeCurrencySymbol": "ETH", + "NativeCurrencyDecimals": 18, + "IsTest": False, + "Layer": 1, + "Enabled": True + } + ] + } + return self.api_valid_request(method, data) + + def restore_account_and_wait_for_rpc_client_to_start(self, timeout=60): + self.restore_account_and_login() + start_time = time.time() + # ToDo: change this part for waiting for `node.login` signal when websockets are migrated to StatusBackend + while time.time() - start_time <= timeout: + try: + self.rpc_valid_request(method='accounts_getKeypairs') + return + except AssertionError: + time.sleep(3) + raise TimeoutError(f"RPC client was not started after {timeout} seconds") + def start_messenger(self, params=[]): method = "wakuext_startMessenger" response = self.rpc_request(method, params) diff --git a/tests-functional/conftest.py b/tests-functional/conftest.py index b6b7b1c2581..9433da149bc 100644 --- a/tests-functional/conftest.py +++ b/tests-functional/conftest.py @@ -82,6 +82,6 @@ def init_status_backend(): websocket_thread.start() backend_client.init_status_backend() - backend_client.create_account_and_login() + backend_client.restore_account_and_wait_for_rpc_client_to_start() yield backend_client diff --git a/tests-functional/constants.py b/tests-functional/constants.py index 2730d4c3800..a164e05aa2d 100644 --- a/tests-functional/constants.py +++ b/tests-functional/constants.py @@ -6,15 +6,18 @@ class Account: address: str private_key: str password: str + passphrase: str user_1 = Account( address="0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266", private_key="0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80", - password="Strong12345" + password="Strong12345", + passphrase="test test test test test test test test test test test junk" ) user_2 = Account( address="0x70997970c51812dc3a010c7d01b50e0d17dc79c8", private_key="0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d", - password="Strong12345" + password="Strong12345", + passphrase="test test test test test test test test test test nest junk" ) diff --git a/tests-functional/pytest.ini b/tests-functional/pytest.ini index 505fab3c83b..c2a4e479b1d 100644 --- a/tests-functional/pytest.ini +++ b/tests-functional/pytest.ini @@ -12,3 +12,5 @@ markers = accounts ethclient init + transaction + create_account diff --git a/tests-functional/schemas/wallet_getEthereumChains b/tests-functional/schemas/wallet_getEthereumChains index b0e3cecc1db..4221378259c 100644 --- a/tests-functional/schemas/wallet_getEthereumChains +++ b/tests-functional/schemas/wallet_getEthereumChains @@ -74,7 +74,7 @@ "type": "object" }, "Test": { - "type": "null" + "type": ["null", "object"] } }, "required": [ diff --git a/tests-functional/tests/test_accounts.py b/tests-functional/tests/test_accounts.py index 0856690182d..5e43c27aa35 100644 --- a/tests-functional/tests/test_accounts.py +++ b/tests-functional/tests/test_accounts.py @@ -2,12 +2,12 @@ import pytest -from test_cases import StatusDTestCase +from test_cases import StatusBackendTestCase @pytest.mark.accounts @pytest.mark.rpc -class TestAccounts(StatusDTestCase): +class TestAccounts(StatusBackendTestCase): @pytest.mark.parametrize( "method, params", diff --git a/tests-functional/tests/test_cases.py b/tests-functional/tests/test_cases.py index a6f60ac9f80..ef77c85e023 100644 --- a/tests-functional/tests/test_cases.py +++ b/tests-functional/tests/test_cases.py @@ -7,7 +7,7 @@ import pytest from clients.signals import SignalClient -from clients.status_backend import RpcClient +from clients.status_backend import RpcClient, StatusBackend from conftest import option from constants import user_1, user_2 @@ -21,14 +21,17 @@ def setup_method(self): ) -class WalletTestCase(StatusDTestCase): +class StatusBackendTestCase: + def setup_class(self): + self.rpc_client = StatusBackend() + self.network_id = 31337 - def setup_method(self): - super().setup_method() + +class WalletTestCase(StatusBackendTestCase): def wallet_create_multi_transaction(self, **kwargs): method = "wallet_createMultiTransaction" - transferTx_data = { + transfer_tx_data = { "data": "", "from": user_1.address, "gas": "0x5BBF", @@ -40,8 +43,8 @@ def wallet_create_multi_transaction(self, **kwargs): "value": "0x5af3107a4000", } for key, new_value in kwargs.items(): - if key in transferTx_data: - transferTx_data[key] = new_value + if key in transfer_tx_data: + transfer_tx_data[key] = new_value else: logging.info( f"Warning: The key '{key}' does not exist in the transferTx parameters and will be ignored.") @@ -58,7 +61,7 @@ def wallet_create_multi_transaction(self, **kwargs): { "bridgeName": "Transfer", "chainID": 31337, - "transferTx": transferTx_data + "transferTx": transfer_tx_data } ], f"{option.password}", @@ -81,7 +84,6 @@ def send_valid_multi_transaction(self, **kwargs): class TransactionTestCase(WalletTestCase): def setup_method(self): - super().setup_method() self.tx_hash = self.send_valid_multi_transaction() @@ -89,10 +91,6 @@ class EthRpcTestCase(WalletTestCase): @pytest.fixture(autouse=True, scope='class') def tx_data(self): - self.rpc_client = RpcClient( - option.rpc_url_statusd - ) - tx_hash = self.send_valid_multi_transaction() self.wait_until_tx_not_pending(tx_hash) @@ -103,8 +101,8 @@ def tx_data(self): except (KeyError, json.JSONDecodeError): raise Exception(receipt.content) - TxData = namedtuple("TxData", ["tx_hash", "block_number", "block_hash"]) - return TxData(tx_hash, block_number, block_hash) + tx_data = namedtuple("TxData", ["tx_hash", "block_number", "block_hash"]) + return tx_data(tx_hash, block_number, block_hash) def get_block_header(self, block_number): method = "ethclient_headerByNumber" @@ -142,9 +140,3 @@ def setup_method(self): websocket_thread = threading.Thread(target=self.signal_client._connect) websocket_thread.daemon = True websocket_thread.start() - - -class StatusBackendTestCase: - - def setup_method(self): - pass diff --git a/tests-functional/tests/test_init_status_app.py b/tests-functional/tests/test_init_status_app.py index 9afb3424cdf..f3566692671 100644 --- a/tests-functional/tests/test_init_status_app.py +++ b/tests-functional/tests/test_init_status_app.py @@ -1,11 +1,9 @@ import pytest -from test_cases import StatusBackendTestCase - @pytest.mark.create_account @pytest.mark.rpc -class TestInitialiseApp(StatusBackendTestCase): +class TestInitialiseApp: @pytest.mark.init def test_init_app(self, init_status_backend): diff --git a/tests-functional/tests/test_waku_rpc.py b/tests-functional/tests/test_waku_rpc.py index 3ddf91f0c20..e3d7ae3c68a 100644 --- a/tests-functional/tests/test_waku_rpc.py +++ b/tests-functional/tests/test_waku_rpc.py @@ -6,11 +6,10 @@ import pytest from conftest import option -from test_cases import StatusDTestCase +from test_cases import StatusBackendTestCase -@pytest.mark.skip("to be reworked via status-backend") -class TestRpc(StatusDTestCase): +class TestRpc(StatusBackendTestCase): @pytest.mark.parametrize( "method, params", @@ -21,12 +20,12 @@ class TestRpc(StatusDTestCase): def test_(self, method, params): _id = str(random.randint(1, 8888)) - response = self.rpc_client.rpc_valid_request(method, params, _id, url=option.rpc_url_2) + response = self.rpc_client.rpc_valid_request(method, params, _id) self.rpc_client.verify_json_schema(response, method) @pytest.mark.skip("to be reworked via status-backend") -class TestRpcMessaging(StatusDTestCase): +class TestRpcMessaging(StatusBackendTestCase): @dataclass class User: rpc_url: str @@ -99,5 +98,5 @@ def test_add_contact(self): self.rpc_client.verify_is_valid_json_rpc_response(response) response = response.json() - assert response["result"][0]["added"] == True + assert response["result"][0]["added"] is True assert response["result"][0]["id"] == user[1].chat_public_key diff --git a/tests-functional/tests/test_wallet_rpc.py b/tests-functional/tests/test_wallet_rpc.py index 64e20041d42..04144b83a43 100644 --- a/tests-functional/tests/test_wallet_rpc.py +++ b/tests-functional/tests/test_wallet_rpc.py @@ -5,7 +5,7 @@ import pytest from conftest import option -from test_cases import StatusDTestCase, TransactionTestCase +from test_cases import StatusBackendTestCase, TransactionTestCase @pytest.mark.wallet @@ -76,7 +76,7 @@ def test_create_multi_transaction_validation(self, method, @pytest.mark.wallet @pytest.mark.rpc -class TestRpc(StatusDTestCase): +class TestRpc(StatusBackendTestCase): @pytest.mark.parametrize( "method, params", From 679391999f71654e233c8b3e7cd63abf03d554ff Mon Sep 17 00:00:00 2001 From: Igor Sirotin Date: Wed, 23 Oct 2024 21:33:05 +0100 Subject: [PATCH 4/7] feat_: `LogOnPanic` linter (#5969) * feat_: LogOnPanic linter * fix_: add missing defer LogOnPanic * chore_: make vendor * fix_: tests, address pr comments * fix_: address pr comments --- Makefile | 5 +- cmd/lint-panics/analyzer/analyzer.go | 245 +++ cmd/lint-panics/analyzer/analyzer_test.go | 28 + cmd/lint-panics/analyzer/config.go | 60 + .../analyzer/testdata/src/common/common.go | 5 + .../testdata/src/functions/anonymous.go | 24 + .../analyzer/testdata/src/functions/free.go | 29 + .../testdata/src/functions/methods.go | 33 + .../testdata/src/functions/pointers.go | 21 + cmd/lint-panics/gopls/dummy_client.go | 81 + cmd/lint-panics/gopls/gopls.go | 155 ++ cmd/lint-panics/gopls/stream.go | 29 + cmd/lint-panics/main.go | 35 + cmd/lint-panics/utils/utils.go | 39 + cmd/status-cli/util.go | 2 +- go.mod | 6 + go.sum | 13 + healthmanager/blockchain_health_manager.go | 2 + metrics/metrics.go | 3 + protocol/encryption/publisher/publisher.go | 1 + protocol/messenger.go | 4 +- .../messenger_store_node_request_manager.go | 2 + rpc/client.go | 1 + services/wallet/api.go | 4 + services/wallet/balance/ttl_cache.go | 6 +- .../wallet/common/mock/feed_subscription.go | 2 + vendor/github.com/segmentio/asm/LICENSE | 21 + .../github.com/segmentio/asm/ascii/ascii.go | 53 + .../segmentio/asm/ascii/equal_fold.go | 30 + .../segmentio/asm/ascii/equal_fold_amd64.go | 13 + .../segmentio/asm/ascii/equal_fold_amd64.s | 304 +++ .../segmentio/asm/ascii/equal_fold_default.go | 60 + .../github.com/segmentio/asm/ascii/valid.go | 18 + .../segmentio/asm/ascii/valid_amd64.go | 9 + .../segmentio/asm/ascii/valid_amd64.s | 132 ++ .../segmentio/asm/ascii/valid_default.go | 48 + .../segmentio/asm/ascii/valid_print.go | 18 + .../segmentio/asm/ascii/valid_print_amd64.go | 9 + .../segmentio/asm/ascii/valid_print_amd64.s | 185 ++ .../asm/ascii/valid_print_default.go | 46 + .../github.com/segmentio/asm/base64/base64.go | 67 + .../segmentio/asm/base64/base64_amd64.go | 160 ++ .../segmentio/asm/base64/base64_default.go | 14 + .../segmentio/asm/base64/decode_amd64.go | 10 + .../segmentio/asm/base64/decode_amd64.s | 144 ++ .../segmentio/asm/base64/encode_amd64.go | 8 + .../segmentio/asm/base64/encode_amd64.s | 88 + .../github.com/segmentio/asm/cpu/arm/arm.go | 80 + .../segmentio/asm/cpu/arm64/arm64.go | 74 + vendor/github.com/segmentio/asm/cpu/cpu.go | 22 + .../segmentio/asm/cpu/cpuid/cpuid.go | 32 + .../github.com/segmentio/asm/cpu/x86/x86.go | 76 + .../asm/internal/unsafebytes/unsafebytes.go | 20 + .../github.com/segmentio/asm/keyset/keyset.go | 40 + .../segmentio/asm/keyset/keyset_amd64.go | 10 + .../segmentio/asm/keyset/keyset_amd64.s | 108 + .../segmentio/asm/keyset/keyset_arm64.go | 8 + .../segmentio/asm/keyset/keyset_arm64.s | 143 ++ .../segmentio/asm/keyset/keyset_default.go | 19 + vendor/github.com/segmentio/encoding/LICENSE | 21 + .../segmentio/encoding/ascii/equal_fold.go | 40 + .../segmentio/encoding/ascii/valid.go | 26 + .../segmentio/encoding/ascii/valid_print.go | 26 + .../segmentio/encoding/iso8601/parse.go | 185 ++ .../segmentio/encoding/iso8601/valid.go | 179 ++ .../segmentio/encoding/json/README.md | 76 + .../segmentio/encoding/json/codec.go | 1232 +++++++++++ .../segmentio/encoding/json/decode.go | 1462 +++++++++++++ .../segmentio/encoding/json/encode.go | 990 +++++++++ .../github.com/segmentio/encoding/json/int.go | 98 + .../segmentio/encoding/json/json.go | 582 +++++ .../segmentio/encoding/json/parse.go | 787 +++++++ .../segmentio/encoding/json/reflect.go | 20 + .../encoding/json/reflect_optimize.go | 30 + .../segmentio/encoding/json/string.go | 70 + .../segmentio/encoding/json/token.go | 416 ++++ vendor/go.lsp.dev/jsonrpc2/.codecov.yml | 38 + vendor/go.lsp.dev/jsonrpc2/.errcheckignore | 1 + vendor/go.lsp.dev/jsonrpc2/.gitattributes | 11 + vendor/go.lsp.dev/jsonrpc2/.gitignore | 52 + vendor/go.lsp.dev/jsonrpc2/.golangci.yml | 200 ++ vendor/go.lsp.dev/jsonrpc2/LICENSE | 29 + vendor/go.lsp.dev/jsonrpc2/Makefile | 129 ++ vendor/go.lsp.dev/jsonrpc2/README.md | 19 + vendor/go.lsp.dev/jsonrpc2/codes.go | 86 + vendor/go.lsp.dev/jsonrpc2/conn.go | 245 +++ vendor/go.lsp.dev/jsonrpc2/errors.go | 70 + vendor/go.lsp.dev/jsonrpc2/handler.go | 120 ++ vendor/go.lsp.dev/jsonrpc2/jsonrpc2.go | 7 + vendor/go.lsp.dev/jsonrpc2/message.go | 358 ++++ vendor/go.lsp.dev/jsonrpc2/serve.go | 129 ++ vendor/go.lsp.dev/jsonrpc2/stream.go | 226 ++ vendor/go.lsp.dev/jsonrpc2/wire.go | 140 ++ vendor/go.lsp.dev/pkg/LICENSE | 29 + vendor/go.lsp.dev/pkg/xcontext/xcontext.go | 22 + vendor/go.lsp.dev/protocol/.codecov.yml | 45 + vendor/go.lsp.dev/protocol/.gitattributes | 11 + vendor/go.lsp.dev/protocol/.gitignore | 52 + vendor/go.lsp.dev/protocol/.golangci.yml | 242 +++ vendor/go.lsp.dev/protocol/LICENSE | 29 + vendor/go.lsp.dev/protocol/Makefile | 126 ++ vendor/go.lsp.dev/protocol/README.md | 19 + vendor/go.lsp.dev/protocol/base.go | 96 + vendor/go.lsp.dev/protocol/basic.go | 705 ++++++ vendor/go.lsp.dev/protocol/callhierarchy.go | 103 + .../protocol/capabilities_client.go | 1061 +++++++++ .../protocol/capabilities_server.go | 523 +++++ vendor/go.lsp.dev/protocol/client.go | 412 ++++ vendor/go.lsp.dev/protocol/context.go | 35 + vendor/go.lsp.dev/protocol/deprecated.go | 264 +++ vendor/go.lsp.dev/protocol/diagnostics.go | 149 ++ vendor/go.lsp.dev/protocol/doc.go | 23 + vendor/go.lsp.dev/protocol/errors.go | 40 + vendor/go.lsp.dev/protocol/general.go | 461 ++++ vendor/go.lsp.dev/protocol/handler.go | 88 + vendor/go.lsp.dev/protocol/language.go | 1316 ++++++++++++ vendor/go.lsp.dev/protocol/log.go | 156 ++ vendor/go.lsp.dev/protocol/progress.go | 119 ++ vendor/go.lsp.dev/protocol/protocol.go | 42 + vendor/go.lsp.dev/protocol/registration.go | 44 + vendor/go.lsp.dev/protocol/selectionrange.go | 110 + vendor/go.lsp.dev/protocol/semantic_token.go | 179 ++ vendor/go.lsp.dev/protocol/server.go | 1892 +++++++++++++++++ vendor/go.lsp.dev/protocol/text.go | 111 + vendor/go.lsp.dev/protocol/util.go | 9 + vendor/go.lsp.dev/protocol/version.go | 7 + vendor/go.lsp.dev/protocol/window.go | 111 + vendor/go.lsp.dev/protocol/workspace.go | 213 ++ vendor/go.lsp.dev/uri/.codecov.yml | 24 + vendor/go.lsp.dev/uri/.gitattributes | 11 + vendor/go.lsp.dev/uri/.gitignore | 49 + vendor/go.lsp.dev/uri/.golangci.yml | 151 ++ vendor/go.lsp.dev/uri/LICENSE | 29 + vendor/go.lsp.dev/uri/Makefile | 15 + vendor/go.lsp.dev/uri/README.md | 19 + vendor/go.lsp.dev/uri/doc.go | 6 + vendor/go.lsp.dev/uri/uri.go | 192 ++ .../x/tools/go/analysis/analysis.go | 249 +++ .../go/analysis/analysistest/analysistest.go | 695 ++++++ .../x/tools/go/analysis/diagnostic.go | 85 + vendor/golang.org/x/tools/go/analysis/doc.go | 317 +++ .../analysis/internal/analysisflags/flags.go | 447 ++++ .../analysis/internal/analysisflags/help.go | 96 + .../go/analysis/internal/analysisflags/url.go | 33 + .../go/analysis/internal/checker/checker.go | 984 +++++++++ .../go/analysis/passes/inspect/inspect.go | 49 + .../analysis/singlechecker/singlechecker.go | 76 + .../go/analysis/unitchecker/unitchecker.go | 452 ++++ .../x/tools/go/analysis/validate.go | 137 ++ .../internal/analysisinternal/analysis.go | 513 +++++ .../internal/analysisinternal/extractdoc.go | 113 + .../golang.org/x/tools/internal/diff/diff.go | 176 ++ .../x/tools/internal/diff/lcs/common.go | 179 ++ .../x/tools/internal/diff/lcs/doc.go | 156 ++ .../x/tools/internal/diff/lcs/git.sh | 33 + .../x/tools/internal/diff/lcs/labels.go | 55 + .../x/tools/internal/diff/lcs/old.go | 480 +++++ .../x/tools/internal/diff/lcs/sequence.go | 113 + .../golang.org/x/tools/internal/diff/ndiff.go | 99 + .../x/tools/internal/diff/unified.go | 251 +++ .../x/tools/internal/facts/facts.go | 389 ++++ .../x/tools/internal/facts/imports.go | 136 ++ .../x/tools/internal/goroot/importcfg.go | 71 + .../tools/internal/robustio/gopls_windows.go | 16 + .../x/tools/internal/robustio/robustio.go | 69 + .../internal/robustio/robustio_darwin.go | 21 + .../tools/internal/robustio/robustio_flaky.go | 92 + .../tools/internal/robustio/robustio_other.go | 28 + .../tools/internal/robustio/robustio_plan9.go | 26 + .../tools/internal/robustio/robustio_posix.go | 26 + .../internal/robustio/robustio_windows.go | 51 + .../x/tools/internal/testenv/exec.go | 192 ++ .../x/tools/internal/testenv/testenv.go | 492 +++++ .../tools/internal/testenv/testenv_notunix.go | 14 + .../x/tools/internal/testenv/testenv_unix.go | 14 + vendor/golang.org/x/tools/txtar/archive.go | 140 ++ vendor/golang.org/x/tools/txtar/fs.go | 259 +++ vendor/modules.txt | 43 + waku/v1/peer.go | 2 + wakuv2/waku.go | 15 +- 180 files changed, 28220 insertions(+), 8 deletions(-) create mode 100644 cmd/lint-panics/analyzer/analyzer.go create mode 100644 cmd/lint-panics/analyzer/analyzer_test.go create mode 100644 cmd/lint-panics/analyzer/config.go create mode 100644 cmd/lint-panics/analyzer/testdata/src/common/common.go create mode 100644 cmd/lint-panics/analyzer/testdata/src/functions/anonymous.go create mode 100644 cmd/lint-panics/analyzer/testdata/src/functions/free.go create mode 100644 cmd/lint-panics/analyzer/testdata/src/functions/methods.go create mode 100644 cmd/lint-panics/analyzer/testdata/src/functions/pointers.go create mode 100644 cmd/lint-panics/gopls/dummy_client.go create mode 100644 cmd/lint-panics/gopls/gopls.go create mode 100644 cmd/lint-panics/gopls/stream.go create mode 100644 cmd/lint-panics/main.go create mode 100644 cmd/lint-panics/utils/utils.go create mode 100644 vendor/github.com/segmentio/asm/LICENSE create mode 100644 vendor/github.com/segmentio/asm/ascii/ascii.go create mode 100644 vendor/github.com/segmentio/asm/ascii/equal_fold.go create mode 100644 vendor/github.com/segmentio/asm/ascii/equal_fold_amd64.go create mode 100644 vendor/github.com/segmentio/asm/ascii/equal_fold_amd64.s create mode 100644 vendor/github.com/segmentio/asm/ascii/equal_fold_default.go create mode 100644 vendor/github.com/segmentio/asm/ascii/valid.go create mode 100644 vendor/github.com/segmentio/asm/ascii/valid_amd64.go create mode 100644 vendor/github.com/segmentio/asm/ascii/valid_amd64.s create mode 100644 vendor/github.com/segmentio/asm/ascii/valid_default.go create mode 100644 vendor/github.com/segmentio/asm/ascii/valid_print.go create mode 100644 vendor/github.com/segmentio/asm/ascii/valid_print_amd64.go create mode 100644 vendor/github.com/segmentio/asm/ascii/valid_print_amd64.s create mode 100644 vendor/github.com/segmentio/asm/ascii/valid_print_default.go create mode 100644 vendor/github.com/segmentio/asm/base64/base64.go create mode 100644 vendor/github.com/segmentio/asm/base64/base64_amd64.go create mode 100644 vendor/github.com/segmentio/asm/base64/base64_default.go create mode 100644 vendor/github.com/segmentio/asm/base64/decode_amd64.go create mode 100644 vendor/github.com/segmentio/asm/base64/decode_amd64.s create mode 100644 vendor/github.com/segmentio/asm/base64/encode_amd64.go create mode 100644 vendor/github.com/segmentio/asm/base64/encode_amd64.s create mode 100644 vendor/github.com/segmentio/asm/cpu/arm/arm.go create mode 100644 vendor/github.com/segmentio/asm/cpu/arm64/arm64.go create mode 100644 vendor/github.com/segmentio/asm/cpu/cpu.go create mode 100644 vendor/github.com/segmentio/asm/cpu/cpuid/cpuid.go create mode 100644 vendor/github.com/segmentio/asm/cpu/x86/x86.go create mode 100644 vendor/github.com/segmentio/asm/internal/unsafebytes/unsafebytes.go create mode 100644 vendor/github.com/segmentio/asm/keyset/keyset.go create mode 100644 vendor/github.com/segmentio/asm/keyset/keyset_amd64.go create mode 100644 vendor/github.com/segmentio/asm/keyset/keyset_amd64.s create mode 100644 vendor/github.com/segmentio/asm/keyset/keyset_arm64.go create mode 100644 vendor/github.com/segmentio/asm/keyset/keyset_arm64.s create mode 100644 vendor/github.com/segmentio/asm/keyset/keyset_default.go create mode 100644 vendor/github.com/segmentio/encoding/LICENSE create mode 100644 vendor/github.com/segmentio/encoding/ascii/equal_fold.go create mode 100644 vendor/github.com/segmentio/encoding/ascii/valid.go create mode 100644 vendor/github.com/segmentio/encoding/ascii/valid_print.go create mode 100644 vendor/github.com/segmentio/encoding/iso8601/parse.go create mode 100644 vendor/github.com/segmentio/encoding/iso8601/valid.go create mode 100644 vendor/github.com/segmentio/encoding/json/README.md create mode 100644 vendor/github.com/segmentio/encoding/json/codec.go create mode 100644 vendor/github.com/segmentio/encoding/json/decode.go create mode 100644 vendor/github.com/segmentio/encoding/json/encode.go create mode 100644 vendor/github.com/segmentio/encoding/json/int.go create mode 100644 vendor/github.com/segmentio/encoding/json/json.go create mode 100644 vendor/github.com/segmentio/encoding/json/parse.go create mode 100644 vendor/github.com/segmentio/encoding/json/reflect.go create mode 100644 vendor/github.com/segmentio/encoding/json/reflect_optimize.go create mode 100644 vendor/github.com/segmentio/encoding/json/string.go create mode 100644 vendor/github.com/segmentio/encoding/json/token.go create mode 100644 vendor/go.lsp.dev/jsonrpc2/.codecov.yml create mode 100644 vendor/go.lsp.dev/jsonrpc2/.errcheckignore create mode 100644 vendor/go.lsp.dev/jsonrpc2/.gitattributes create mode 100644 vendor/go.lsp.dev/jsonrpc2/.gitignore create mode 100644 vendor/go.lsp.dev/jsonrpc2/.golangci.yml create mode 100644 vendor/go.lsp.dev/jsonrpc2/LICENSE create mode 100644 vendor/go.lsp.dev/jsonrpc2/Makefile create mode 100644 vendor/go.lsp.dev/jsonrpc2/README.md create mode 100644 vendor/go.lsp.dev/jsonrpc2/codes.go create mode 100644 vendor/go.lsp.dev/jsonrpc2/conn.go create mode 100644 vendor/go.lsp.dev/jsonrpc2/errors.go create mode 100644 vendor/go.lsp.dev/jsonrpc2/handler.go create mode 100644 vendor/go.lsp.dev/jsonrpc2/jsonrpc2.go create mode 100644 vendor/go.lsp.dev/jsonrpc2/message.go create mode 100644 vendor/go.lsp.dev/jsonrpc2/serve.go create mode 100644 vendor/go.lsp.dev/jsonrpc2/stream.go create mode 100644 vendor/go.lsp.dev/jsonrpc2/wire.go create mode 100644 vendor/go.lsp.dev/pkg/LICENSE create mode 100644 vendor/go.lsp.dev/pkg/xcontext/xcontext.go create mode 100644 vendor/go.lsp.dev/protocol/.codecov.yml create mode 100644 vendor/go.lsp.dev/protocol/.gitattributes create mode 100644 vendor/go.lsp.dev/protocol/.gitignore create mode 100644 vendor/go.lsp.dev/protocol/.golangci.yml create mode 100644 vendor/go.lsp.dev/protocol/LICENSE create mode 100644 vendor/go.lsp.dev/protocol/Makefile create mode 100644 vendor/go.lsp.dev/protocol/README.md create mode 100644 vendor/go.lsp.dev/protocol/base.go create mode 100644 vendor/go.lsp.dev/protocol/basic.go create mode 100644 vendor/go.lsp.dev/protocol/callhierarchy.go create mode 100644 vendor/go.lsp.dev/protocol/capabilities_client.go create mode 100644 vendor/go.lsp.dev/protocol/capabilities_server.go create mode 100644 vendor/go.lsp.dev/protocol/client.go create mode 100644 vendor/go.lsp.dev/protocol/context.go create mode 100644 vendor/go.lsp.dev/protocol/deprecated.go create mode 100644 vendor/go.lsp.dev/protocol/diagnostics.go create mode 100644 vendor/go.lsp.dev/protocol/doc.go create mode 100644 vendor/go.lsp.dev/protocol/errors.go create mode 100644 vendor/go.lsp.dev/protocol/general.go create mode 100644 vendor/go.lsp.dev/protocol/handler.go create mode 100644 vendor/go.lsp.dev/protocol/language.go create mode 100644 vendor/go.lsp.dev/protocol/log.go create mode 100644 vendor/go.lsp.dev/protocol/progress.go create mode 100644 vendor/go.lsp.dev/protocol/protocol.go create mode 100644 vendor/go.lsp.dev/protocol/registration.go create mode 100644 vendor/go.lsp.dev/protocol/selectionrange.go create mode 100644 vendor/go.lsp.dev/protocol/semantic_token.go create mode 100644 vendor/go.lsp.dev/protocol/server.go create mode 100644 vendor/go.lsp.dev/protocol/text.go create mode 100644 vendor/go.lsp.dev/protocol/util.go create mode 100644 vendor/go.lsp.dev/protocol/version.go create mode 100644 vendor/go.lsp.dev/protocol/window.go create mode 100644 vendor/go.lsp.dev/protocol/workspace.go create mode 100644 vendor/go.lsp.dev/uri/.codecov.yml create mode 100644 vendor/go.lsp.dev/uri/.gitattributes create mode 100644 vendor/go.lsp.dev/uri/.gitignore create mode 100644 vendor/go.lsp.dev/uri/.golangci.yml create mode 100644 vendor/go.lsp.dev/uri/LICENSE create mode 100644 vendor/go.lsp.dev/uri/Makefile create mode 100644 vendor/go.lsp.dev/uri/README.md create mode 100644 vendor/go.lsp.dev/uri/doc.go create mode 100644 vendor/go.lsp.dev/uri/uri.go create mode 100644 vendor/golang.org/x/tools/go/analysis/analysis.go create mode 100644 vendor/golang.org/x/tools/go/analysis/analysistest/analysistest.go create mode 100644 vendor/golang.org/x/tools/go/analysis/diagnostic.go create mode 100644 vendor/golang.org/x/tools/go/analysis/doc.go create mode 100644 vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go create mode 100644 vendor/golang.org/x/tools/go/analysis/internal/analysisflags/help.go create mode 100644 vendor/golang.org/x/tools/go/analysis/internal/analysisflags/url.go create mode 100644 vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go create mode 100644 vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go create mode 100644 vendor/golang.org/x/tools/go/analysis/singlechecker/singlechecker.go create mode 100644 vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go create mode 100644 vendor/golang.org/x/tools/go/analysis/validate.go create mode 100644 vendor/golang.org/x/tools/internal/analysisinternal/analysis.go create mode 100644 vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go create mode 100644 vendor/golang.org/x/tools/internal/diff/diff.go create mode 100644 vendor/golang.org/x/tools/internal/diff/lcs/common.go create mode 100644 vendor/golang.org/x/tools/internal/diff/lcs/doc.go create mode 100644 vendor/golang.org/x/tools/internal/diff/lcs/git.sh create mode 100644 vendor/golang.org/x/tools/internal/diff/lcs/labels.go create mode 100644 vendor/golang.org/x/tools/internal/diff/lcs/old.go create mode 100644 vendor/golang.org/x/tools/internal/diff/lcs/sequence.go create mode 100644 vendor/golang.org/x/tools/internal/diff/ndiff.go create mode 100644 vendor/golang.org/x/tools/internal/diff/unified.go create mode 100644 vendor/golang.org/x/tools/internal/facts/facts.go create mode 100644 vendor/golang.org/x/tools/internal/facts/imports.go create mode 100644 vendor/golang.org/x/tools/internal/goroot/importcfg.go create mode 100644 vendor/golang.org/x/tools/internal/robustio/gopls_windows.go create mode 100644 vendor/golang.org/x/tools/internal/robustio/robustio.go create mode 100644 vendor/golang.org/x/tools/internal/robustio/robustio_darwin.go create mode 100644 vendor/golang.org/x/tools/internal/robustio/robustio_flaky.go create mode 100644 vendor/golang.org/x/tools/internal/robustio/robustio_other.go create mode 100644 vendor/golang.org/x/tools/internal/robustio/robustio_plan9.go create mode 100644 vendor/golang.org/x/tools/internal/robustio/robustio_posix.go create mode 100644 vendor/golang.org/x/tools/internal/robustio/robustio_windows.go create mode 100644 vendor/golang.org/x/tools/internal/testenv/exec.go create mode 100644 vendor/golang.org/x/tools/internal/testenv/testenv.go create mode 100644 vendor/golang.org/x/tools/internal/testenv/testenv_notunix.go create mode 100644 vendor/golang.org/x/tools/internal/testenv/testenv_unix.go create mode 100644 vendor/golang.org/x/tools/txtar/archive.go create mode 100644 vendor/golang.org/x/tools/txtar/fs.go diff --git a/Makefile b/Makefile index 3804269da33..0d0916ee48e 100644 --- a/Makefile +++ b/Makefile @@ -408,7 +408,10 @@ canary-test: node-canary # TODO: uncomment that! #_assets/scripts/canary_test_mailservers.sh ./config/cli/fleet-eth.prod.json -lint: generate +lint-panics: generate + go run ./cmd/lint-panics -root="$(call sh, pwd)" -skip=./cmd -test=false ./... + +lint: generate lint-panics golangci-lint run ./... ci: generate lint canary-test test-unit test-e2e ##@tests Run all linters and tests at once diff --git a/cmd/lint-panics/analyzer/analyzer.go b/cmd/lint-panics/analyzer/analyzer.go new file mode 100644 index 00000000000..d7e9df69316 --- /dev/null +++ b/cmd/lint-panics/analyzer/analyzer.go @@ -0,0 +1,245 @@ +package analyzer + +import ( + "context" + "fmt" + "go/ast" + "os" + + "go.uber.org/zap" + + goparser "go/parser" + gotoken "go/token" + "strings" + + "github.com/pkg/errors" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/inspector" + + "github.com/status-im/status-go/cmd/lint-panics/gopls" + "github.com/status-im/status-go/cmd/lint-panics/utils" +) + +const Pattern = "LogOnPanic" + +type Analyzer struct { + logger *zap.Logger + lsp LSP + cfg *Config +} + +type LSP interface { + Definition(context.Context, string, int, int) (string, int, error) +} + +func New(ctx context.Context, logger *zap.Logger) (*analysis.Analyzer, error) { + cfg := Config{} + flags, err := cfg.ParseFlags() + if err != nil { + return nil, err + } + + logger.Info("creating analyzer", zap.String("root", cfg.RootDir)) + + goplsClient := gopls.NewGoplsClient(ctx, logger, cfg.RootDir) + processor := newAnalyzer(logger, goplsClient, &cfg) + + analyzer := &analysis.Analyzer{ + Name: "logpanics", + Doc: fmt.Sprintf("reports missing defer call to %s", Pattern), + Flags: flags, + Requires: []*analysis.Analyzer{inspect.Analyzer}, + Run: func(pass *analysis.Pass) (interface{}, error) { + return processor.Run(ctx, pass) + }, + } + + return analyzer, nil +} + +func newAnalyzer(logger *zap.Logger, lsp LSP, cfg *Config) *Analyzer { + return &Analyzer{ + logger: logger.Named("processor"), + lsp: lsp, + cfg: cfg.WithAbsolutePaths(), + } +} + +func (p *Analyzer) Run(ctx context.Context, pass *analysis.Pass) (interface{}, error) { + inspected, ok := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) + if !ok { + return nil, errors.New("analyzer is not type *inspector.Inspector") + } + + // Create a nodes filter for goroutines (GoStmt represents a 'go' statement) + nodeFilter := []ast.Node{ + (*ast.GoStmt)(nil), + } + + // Inspect go statements + inspected.Preorder(nodeFilter, func(n ast.Node) { + p.ProcessNode(ctx, pass, n) + }) + + return nil, nil +} + +func (p *Analyzer) ProcessNode(ctx context.Context, pass *analysis.Pass, n ast.Node) { + goStmt, ok := n.(*ast.GoStmt) + if !ok { + panic("unexpected node type") + } + + switch fun := goStmt.Call.Fun.(type) { + case *ast.FuncLit: // anonymous function + pos := pass.Fset.Position(fun.Pos()) + logger := p.logger.With( + utils.ZapURI(pos.Filename, pos.Line), + zap.Int("column", pos.Column), + ) + + logger.Debug("found anonymous goroutine") + if err := p.checkGoroutine(fun.Body); err != nil { + p.logLinterError(pass, fun.Pos(), fun.Pos(), err) + } + + case *ast.SelectorExpr: // method call + pos := pass.Fset.Position(fun.Sel.Pos()) + p.logger.Info("found method call as goroutine", + zap.String("methodName", fun.Sel.Name), + utils.ZapURI(pos.Filename, pos.Line), + zap.Int("column", pos.Column), + ) + + defPos, err := p.checkGoroutineDefinition(ctx, pos, pass) + if err != nil { + p.logLinterError(pass, defPos, fun.Sel.Pos(), err) + } + + case *ast.Ident: // function call + pos := pass.Fset.Position(fun.Pos()) + p.logger.Info("found function call as goroutine", + zap.String("functionName", fun.Name), + utils.ZapURI(pos.Filename, pos.Line), + zap.Int("column", pos.Column), + ) + + defPos, err := p.checkGoroutineDefinition(ctx, pos, pass) + if err != nil { + p.logLinterError(pass, defPos, fun.Pos(), err) + } + + default: + p.logger.Error("unexpected goroutine type", + zap.String("type", fmt.Sprintf("%T", fun)), + ) + } +} + +func (p *Analyzer) parseFile(path string, pass *analysis.Pass) (*ast.File, error) { + logger := p.logger.With(zap.String("path", path)) + + src, err := os.ReadFile(path) + if err != nil { + logger.Error("failed to open file", zap.Error(err)) + } + + file, err := goparser.ParseFile(pass.Fset, path, src, 0) + if err != nil { + logger.Error("failed to parse file", zap.Error(err)) + return nil, err + } + + return file, nil +} + +func (p *Analyzer) checkGoroutine(body *ast.BlockStmt) error { + if body == nil { + p.logger.Warn("missing function body") + return nil + } + if len(body.List) == 0 { + // empty goroutine is weird, but it never panics, so not a linter error + return nil + } + + deferStatement, ok := body.List[0].(*ast.DeferStmt) + if !ok { + return errors.New("first statement is not defer") + } + + selectorExpr, ok := deferStatement.Call.Fun.(*ast.SelectorExpr) + if !ok { + return errors.New("first statement call is not a selector") + } + + firstLineFunName := selectorExpr.Sel.Name + if firstLineFunName != Pattern { + return errors.Errorf("first statement is not %s", Pattern) + } + + return nil +} + +func (p *Analyzer) getFunctionBody(node ast.Node, lineNumber int, pass *analysis.Pass) (body *ast.BlockStmt, pos gotoken.Pos) { + ast.Inspect(node, func(n ast.Node) bool { + // Check if the node is a function declaration + funcDecl, ok := n.(*ast.FuncDecl) + if !ok { + return true + } + + if pass.Fset.Position(n.Pos()).Line != lineNumber { + return true + } + + body = funcDecl.Body + pos = n.Pos() + return false + }) + + return body, pos + +} + +func (p *Analyzer) checkGoroutineDefinition(ctx context.Context, pos gotoken.Position, pass *analysis.Pass) (gotoken.Pos, error) { + defFilePath, defLineNumber, err := p.lsp.Definition(ctx, pos.Filename, pos.Line, pos.Column) + if err != nil { + p.logger.Error("failed to find function definition", zap.Error(err)) + return 0, err + } + + file, err := p.parseFile(defFilePath, pass) + if err != nil { + p.logger.Error("failed to parse file", zap.Error(err)) + return 0, err + } + + body, defPosition := p.getFunctionBody(file, defLineNumber, pass) + return defPosition, p.checkGoroutine(body) +} + +func (p *Analyzer) logLinterError(pass *analysis.Pass, errPos gotoken.Pos, callPos gotoken.Pos, err error) { + errPosition := pass.Fset.Position(errPos) + callPosition := pass.Fset.Position(callPos) + + if p.skip(errPosition.Filename) || p.skip(callPosition.Filename) { + return + } + + message := fmt.Sprintf("missing %s()", Pattern) + p.logger.Warn(message, + utils.ZapURI(errPosition.Filename, errPosition.Line), + zap.String("details", err.Error())) + + if callPos == errPos { + pass.Reportf(errPos, "missing defer call to %s", Pattern) + } else { + pass.Reportf(callPos, "missing defer call to %s", Pattern) + } +} + +func (p *Analyzer) skip(filepath string) bool { + return p.cfg.SkipDir != "" && strings.HasPrefix(filepath, p.cfg.SkipDir) +} diff --git a/cmd/lint-panics/analyzer/analyzer_test.go b/cmd/lint-panics/analyzer/analyzer_test.go new file mode 100644 index 00000000000..9d856dff4ff --- /dev/null +++ b/cmd/lint-panics/analyzer/analyzer_test.go @@ -0,0 +1,28 @@ +package analyzer + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "golang.org/x/tools/go/analysis/analysistest" + + "github.com/status-im/status-go/cmd/lint-panics/utils" +) + +func TestMethods(t *testing.T) { + t.Parallel() + + logger := utils.BuildLogger(zap.DebugLevel) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + defer cancel() + + a, err := New(ctx, logger) + require.NoError(t, err) + + analysistest.Run(t, analysistest.TestData(), a, "functions") +} diff --git a/cmd/lint-panics/analyzer/config.go b/cmd/lint-panics/analyzer/config.go new file mode 100644 index 00000000000..79452a01ee6 --- /dev/null +++ b/cmd/lint-panics/analyzer/config.go @@ -0,0 +1,60 @@ +package analyzer + +import ( + "flag" + "io" + "os" + "path" + "strings" +) + +type Config struct { + RootDir string + SkipDir string +} + +var workdir string + +func init() { + var err error + workdir, err = os.Getwd() + if err != nil { + panic(err) + } +} + +func (c *Config) ParseFlags() (flag.FlagSet, error) { + flags := flag.NewFlagSet("lint-panics", flag.ContinueOnError) + flags.SetOutput(io.Discard) // Otherwise errors are printed to stderr + flags.StringVar(&c.RootDir, "root", workdir, "root directory to run gopls") + flags.StringVar(&c.SkipDir, "skip", "", "skip paths with this suffix") + + // We parse the flags here to have `rootDir` before the call to `singlechecker.Main(analyzer)` + // For same reasons we discard the output and skip the undefined flag error. + err := flags.Parse(os.Args[1:]) + if err == nil { + return *flags, nil + } + + if strings.Contains(err.Error(), "flag provided but not defined") { + err = nil + } else if strings.Contains(err.Error(), "help requested") { + err = nil + } + + return *flags, err +} + +func (c *Config) WithAbsolutePaths() *Config { + out := *c + + if !path.IsAbs(out.RootDir) { + out.RootDir = path.Join(workdir, out.RootDir) + } + + if out.SkipDir != "" && !path.IsAbs(out.SkipDir) { + out.SkipDir = path.Join(out.RootDir, out.SkipDir) + } + + return &out +} diff --git a/cmd/lint-panics/analyzer/testdata/src/common/common.go b/cmd/lint-panics/analyzer/testdata/src/common/common.go new file mode 100644 index 00000000000..41fd4949ef2 --- /dev/null +++ b/cmd/lint-panics/analyzer/testdata/src/common/common.go @@ -0,0 +1,5 @@ +package common + +func LogOnPanic() { + // do nothing +} diff --git a/cmd/lint-panics/analyzer/testdata/src/functions/anonymous.go b/cmd/lint-panics/analyzer/testdata/src/functions/anonymous.go new file mode 100644 index 00000000000..e4249b1eb62 --- /dev/null +++ b/cmd/lint-panics/analyzer/testdata/src/functions/anonymous.go @@ -0,0 +1,24 @@ +package functions + +import ( + "common" + "fmt" +) + +func init() { + go func() { + defer common.LogOnPanic() + }() + + go func() { + + }() + + go func() { // want "missing defer call to LogOnPanic" + fmt.Println("anon") + }() + + go func() { // want "missing defer call to LogOnPanic" + common.LogOnPanic() + }() +} diff --git a/cmd/lint-panics/analyzer/testdata/src/functions/free.go b/cmd/lint-panics/analyzer/testdata/src/functions/free.go new file mode 100644 index 00000000000..26a12c3ca38 --- /dev/null +++ b/cmd/lint-panics/analyzer/testdata/src/functions/free.go @@ -0,0 +1,29 @@ +package functions + +import ( + "common" + "fmt" +) + +func init() { + go ok() + go empty() + go noLogOnPanic() // want "missing defer call to LogOnPanic" + go notDefer() // want "missing defer call to LogOnPanic" +} + +func ok() { + defer common.LogOnPanic() +} + +func empty() { + +} + +func noLogOnPanic() { + defer fmt.Println("Bar") +} + +func notDefer() { + common.LogOnPanic() +} diff --git a/cmd/lint-panics/analyzer/testdata/src/functions/methods.go b/cmd/lint-panics/analyzer/testdata/src/functions/methods.go new file mode 100644 index 00000000000..13e5383231e --- /dev/null +++ b/cmd/lint-panics/analyzer/testdata/src/functions/methods.go @@ -0,0 +1,33 @@ +package functions + +import ( + "common" + "fmt" +) + +type Test struct { +} + +func init() { + t := Test{} + go t.ok() + go t.empty() + go t.noLogOnPanic() // want "missing defer call to LogOnPanic" + go t.notDefer() // want "missing defer call to LogOnPanic" +} + +func (p *Test) ok() { + defer common.LogOnPanic() +} + +func (p *Test) empty() { + +} + +func (p *Test) noLogOnPanic() { + defer fmt.Println("FooNoLogOnPanic") +} + +func (p *Test) notDefer() { + common.LogOnPanic() +} diff --git a/cmd/lint-panics/analyzer/testdata/src/functions/pointers.go b/cmd/lint-panics/analyzer/testdata/src/functions/pointers.go new file mode 100644 index 00000000000..d2985395d7d --- /dev/null +++ b/cmd/lint-panics/analyzer/testdata/src/functions/pointers.go @@ -0,0 +1,21 @@ +package functions + +import ( + "common" +) + +func init() { + runAsync(ok) + runAsyncOk(ok) +} + +func runAsync(fn func()) { + go fn() // want "missing defer call to LogOnPanic" +} + +func runAsyncOk(fn func()) { + go func() { + defer common.LogOnPanic() + fn() + }() +} diff --git a/cmd/lint-panics/gopls/dummy_client.go b/cmd/lint-panics/gopls/dummy_client.go new file mode 100644 index 00000000000..1ce026ca453 --- /dev/null +++ b/cmd/lint-panics/gopls/dummy_client.go @@ -0,0 +1,81 @@ +package gopls + +import ( + "context" + + "go.lsp.dev/protocol" + + "go.uber.org/zap" +) + +type DummyClient struct { + logger *zap.Logger +} + +func NewDummyClient(logger *zap.Logger) *DummyClient { + if logger == nil { + logger = zap.NewNop() + } + return &DummyClient{ + logger: logger, + } +} + +func (d *DummyClient) Progress(ctx context.Context, params *protocol.ProgressParams) (err error) { + d.logger.Debug("client: Progress", zap.Any("params", params)) + return +} +func (d *DummyClient) WorkDoneProgressCreate(ctx context.Context, params *protocol.WorkDoneProgressCreateParams) (err error) { + d.logger.Debug("client: WorkDoneProgressCreate") + return nil +} + +func (d *DummyClient) LogMessage(ctx context.Context, params *protocol.LogMessageParams) (err error) { + d.logger.Debug("client: LogMessage", zap.Any("message", params)) + return nil +} + +func (d *DummyClient) PublishDiagnostics(ctx context.Context, params *protocol.PublishDiagnosticsParams) (err error) { + d.logger.Debug("client: PublishDiagnostics") + return nil +} + +func (d *DummyClient) ShowMessage(ctx context.Context, params *protocol.ShowMessageParams) (err error) { + d.logger.Debug("client: ShowMessage", zap.Any("message", params)) + return nil +} + +func (d *DummyClient) ShowMessageRequest(ctx context.Context, params *protocol.ShowMessageRequestParams) (result *protocol.MessageActionItem, err error) { + d.logger.Debug("client: ShowMessageRequest", zap.Any("message", params)) + return nil, nil +} + +func (d *DummyClient) Telemetry(ctx context.Context, params interface{}) (err error) { + d.logger.Debug("client: Telemetry") + return nil +} + +func (d *DummyClient) RegisterCapability(ctx context.Context, params *protocol.RegistrationParams) (err error) { + d.logger.Debug("client: RegisterCapability") + return nil +} + +func (d *DummyClient) UnregisterCapability(ctx context.Context, params *protocol.UnregistrationParams) (err error) { + d.logger.Debug("client: UnregisterCapability") + return nil +} + +func (d *DummyClient) ApplyEdit(ctx context.Context, params *protocol.ApplyWorkspaceEditParams) (result bool, err error) { + d.logger.Debug("client: ApplyEdit") + return false, nil +} + +func (d *DummyClient) Configuration(ctx context.Context, params *protocol.ConfigurationParams) (result []interface{}, err error) { + d.logger.Debug("client: Configuration") + return nil, nil +} + +func (d *DummyClient) WorkspaceFolders(ctx context.Context) (result []protocol.WorkspaceFolder, err error) { + d.logger.Debug("client: WorkspaceFolders") + return nil, nil +} diff --git a/cmd/lint-panics/gopls/gopls.go b/cmd/lint-panics/gopls/gopls.go new file mode 100644 index 00000000000..ac8e4750d25 --- /dev/null +++ b/cmd/lint-panics/gopls/gopls.go @@ -0,0 +1,155 @@ +package gopls + +import ( + "os/exec" + + "github.com/pkg/errors" + + "context" + + "go.lsp.dev/jsonrpc2" + "go.lsp.dev/protocol" + + "time" + + "go.lsp.dev/uri" + "go.uber.org/zap" +) + +type Connection struct { + logger *zap.Logger + server protocol.Server + cmd *exec.Cmd + conn jsonrpc2.Conn +} + +func NewGoplsClient(ctx context.Context, logger *zap.Logger, rootDir string) *Connection { + var err error + + logger.Debug("initializing gopls client") + + gopls := &Connection{ + logger: logger, + } + + client := NewDummyClient(logger) + + // Step 1: Create a JSON-RPC connection using stdin and stdout + gopls.cmd = exec.Command("gopls", "serve") + + stdin, err := gopls.cmd.StdinPipe() + if err != nil { + logger.Error("Failed to get stdin pipe", zap.Error(err)) + panic(err) + } + + stdout, err := gopls.cmd.StdoutPipe() + if err != nil { + logger.Error("Failed to get stdout pipe", zap.Error(err)) + panic(err) + } + + err = gopls.cmd.Start() + if err != nil { + logger.Error("Failed to start gopls", zap.Error(err)) + panic(err) + } + + stream := jsonrpc2.NewStream(&IOStream{ + stdin: stdin, + stdout: stdout, + }) + + // Step 2: Create a client for the running gopls server + ctx, gopls.conn, gopls.server = protocol.NewClient(ctx, client, stream, logger) + + // Step 3: Initialize the gopls server + initParams := protocol.InitializeParams{ + RootURI: uri.From("file", "", rootDir, "", ""), + InitializationOptions: map[string]interface{}{ + "symbolMatcher": "FastFuzzy", + }, + } + + _, err = gopls.server.Initialize(ctx, &initParams) + if err != nil { + logger.Error("Error during initialize", zap.Error(err)) + panic(err) + } + + // Step 4: Send 'initialized' notification + err = gopls.server.Initialized(ctx, &protocol.InitializedParams{}) + if err != nil { + logger.Error("Error during initialized", zap.Error(err)) + panic(err) + } + + return gopls +} + +func (gopls *Connection) Definition(ctx context.Context, filePath string, lineNumber int, charPosition int) (string, int, error) { + // NOTE: gopls uses 0-based line and column numbers + defFile, defLine, err := gopls.definition(ctx, filePath, lineNumber-1, charPosition-1) + return defFile, defLine + 1, err +} + +func (gopls *Connection) definition(ctx context.Context, filePath string, lineNumber int, charPosition int) (string, int, error) { + // Define the file URI and position where the function/method is invoked + fileURI := protocol.DocumentURI("file://" + filePath) // Replace with actual file URI + line := lineNumber // Line number where the function is called + character := charPosition // Character (column) where the function is called + + // Send the definition request + params := &protocol.DefinitionParams{ + TextDocumentPositionParams: protocol.TextDocumentPositionParams{ + TextDocument: protocol.TextDocumentIdentifier{ + URI: fileURI, + }, + Position: protocol.Position{ + Line: uint32(line), + Character: uint32(character), + }, + }, + } + + // Create context with a timeout to avoid hanging + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + locations, err := gopls.server.Definition(ctx, params) + if err != nil { + return "", 0, errors.Wrap(err, "failed to fetch definition") + } + + if len(locations) == 0 { + return "", 0, errors.New("no definition found") + } + + location := locations[0] + return location.URI.Filename(), int(location.Range.Start.Line), nil +} + +func (gopls *Connection) DidOpen(ctx context.Context, path string, content string, logger *zap.Logger) { + err := gopls.server.DidOpen(ctx, &protocol.DidOpenTextDocumentParams{ + TextDocument: protocol.TextDocumentItem{ + URI: protocol.DocumentURI(path), + LanguageID: "go", + Version: 1, + Text: content, + }, + }) + if err != nil { + logger.Error("failed to call DidOpen", zap.Error(err)) + } +} + +func (gopls *Connection) DidClose(ctx context.Context, path string, lgoger *zap.Logger) { + err := gopls.server.DidClose(ctx, &protocol.DidCloseTextDocumentParams{ + TextDocument: protocol.TextDocumentIdentifier{ + URI: protocol.DocumentURI(path), + }, + }) + if err != nil { + lgoger.Error("failed to call DidClose", zap.Error(err)) + } +} diff --git a/cmd/lint-panics/gopls/stream.go b/cmd/lint-panics/gopls/stream.go new file mode 100644 index 00000000000..4792d3f26b7 --- /dev/null +++ b/cmd/lint-panics/gopls/stream.go @@ -0,0 +1,29 @@ +package gopls + +import "io" + +// IOStream combines stdin and stdout into one interface. +type IOStream struct { + stdin io.WriteCloser + stdout io.ReadCloser +} + +// Write writes data to stdin. +func (c *IOStream) Write(p []byte) (n int, err error) { + return c.stdin.Write(p) +} + +// Read reads data from stdout. +func (c *IOStream) Read(p []byte) (n int, err error) { + return c.stdout.Read(p) +} + +// Close closes both stdin and stdout. +func (c *IOStream) Close() error { + err1 := c.stdin.Close() + err2 := c.stdout.Close() + if err1 != nil { + return err1 + } + return err2 +} diff --git a/cmd/lint-panics/main.go b/cmd/lint-panics/main.go new file mode 100644 index 00000000000..d046f0d8954 --- /dev/null +++ b/cmd/lint-panics/main.go @@ -0,0 +1,35 @@ +package main + +import ( + "context" + "os" + "time" + + "go.uber.org/zap" + "golang.org/x/tools/go/analysis/singlechecker" + + "github.com/status-im/status-go/cmd/lint-panics/analyzer" + "github.com/status-im/status-go/cmd/lint-panics/utils" +) + +/* + Run with `-root=` to specify the root directory to run gopls. Defaults to the current working directory. + Set `-skip=` to skip errors in certain directories. If relative, it is relative to the root directory. + + If provided, `-root` and `-skip` arguments MUST go first, before any other args. +*/ + +func main() { + logger := utils.BuildLogger(zap.ErrorLevel) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + a, err := analyzer.New(ctx, logger) + if err != nil { + logger.Error("failed to create analyzer", zap.Error(err)) + os.Exit(1) + } + + singlechecker.Main(a) +} diff --git a/cmd/lint-panics/utils/utils.go b/cmd/lint-panics/utils/utils.go new file mode 100644 index 00000000000..e0f69588a29 --- /dev/null +++ b/cmd/lint-panics/utils/utils.go @@ -0,0 +1,39 @@ +package utils + +import ( + "strconv" + + "fmt" + "os" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +func URI(path string, line int) string { + return path + ":" + strconv.Itoa(line) +} + +func ZapURI(path string, line int) zap.Field { + return zap.Field{ + Type: zapcore.StringType, + Key: "uri", + String: URI(path, line), + } +} + +func BuildLogger(level zapcore.Level) *zap.Logger { + // Initialize logger with colors + loggerConfig := zap.NewDevelopmentConfig() + loggerConfig.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder + loggerConfig.Level = zap.NewAtomicLevelAt(level) + loggerConfig.Development = false + loggerConfig.DisableStacktrace = true + logger, err := loggerConfig.Build() + if err != nil { + fmt.Printf("failed to initialize logger: %s", err.Error()) + os.Exit(1) + } + + return logger.Named("main") +} diff --git a/cmd/status-cli/util.go b/cmd/status-cli/util.go index 754f49cfe6e..6cbc4736cf9 100644 --- a/cmd/status-cli/util.go +++ b/cmd/status-cli/util.go @@ -81,7 +81,7 @@ func start(p StartParams, logger *zap.SugaredLogger) (*StatusCLI, error) { } waku := backend.StatusNode().WakuV2Service() telemetryClient := telemetry.NewClient(telemetryLogger, p.TelemetryURL, backend.SelectedAccountKeyID(), p.Name, "cli", telemetry.WithPeerID(waku.PeerID().String())) - go telemetryClient.Start(context.Background()) + telemetryClient.Start(context.Background()) backend.StatusNode().WakuV2Service().SetStatusTelemetryClient(telemetryClient) } wakuAPI := wakuv2ext.NewPublicAPI(wakuService) diff --git a/go.mod b/go.mod index b3abfac22bc..b04f014ec00 100644 --- a/go.mod +++ b/go.mod @@ -99,6 +99,9 @@ require ( github.com/wk8/go-ordered-map/v2 v2.1.7 github.com/yeqown/go-qrcode/v2 v2.2.1 github.com/yeqown/go-qrcode/writer/standard v1.2.1 + go.lsp.dev/jsonrpc2 v0.10.0 + go.lsp.dev/protocol v0.12.0 + go.lsp.dev/uri v0.3.0 go.uber.org/mock v0.4.0 go.uber.org/multierr v1.11.0 golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa @@ -253,6 +256,8 @@ require ( github.com/russolsen/ohyeah v0.0.0-20160324131710-f4938c005315 // indirect github.com/russolsen/same v0.0.0-20160222130632-f089df61f51d // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/segmentio/asm v1.1.3 // indirect + github.com/segmentio/encoding v0.3.4 // indirect github.com/shirou/gopsutil v3.21.11+incompatible // indirect github.com/shopspring/decimal v1.2.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect @@ -275,6 +280,7 @@ require ( github.com/yeqown/reedsolomon v1.0.0 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect go.etcd.io/bbolt v1.3.6 // indirect + go.lsp.dev/pkg v0.0.0-20210717090340-384b27a52fb2 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/dig v1.18.0 // indirect go.uber.org/fx v1.22.2 // indirect diff --git a/go.sum b/go.sum index 9112614af33..d489fe7ef1b 100644 --- a/go.sum +++ b/go.sum @@ -1936,6 +1936,10 @@ github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24 github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/segmentio/asm v1.1.3 h1:WM03sfUOENvvKexOLp+pCqgb/WDjsi7EK8gIsICtzhc= +github.com/segmentio/asm v1.1.3/go.mod h1:Ld3L4ZXGNcSLRg4JBsZ3//1+f/TjYl0Mzen/DQy1EJg= +github.com/segmentio/encoding v0.3.4 h1:WM4IBnxH8B9TakiM2QD5LyNl9JSndh88QbHqVC+Pauc= +github.com/segmentio/encoding v0.3.4/go.mod h1:n0JeuIqEQrQoPDGsjo8UNd1iA0U8d8+oHAA4E3G3OxM= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= @@ -2221,6 +2225,14 @@ go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lL go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= +go.lsp.dev/jsonrpc2 v0.10.0 h1:Pr/YcXJoEOTMc/b6OTmcR1DPJ3mSWl/SWiU1Cct6VmI= +go.lsp.dev/jsonrpc2 v0.10.0/go.mod h1:fmEzIdXPi/rf6d4uFcayi8HpFP1nBF99ERP1htC72Ac= +go.lsp.dev/pkg v0.0.0-20210717090340-384b27a52fb2 h1:hCzQgh6UcwbKgNSRurYWSqh8MufqRRPODRBblutn4TE= +go.lsp.dev/pkg v0.0.0-20210717090340-384b27a52fb2/go.mod h1:gtSHRuYfbCT0qnbLnovpie/WEmqyJ7T4n6VXiFMBtcw= +go.lsp.dev/protocol v0.12.0 h1:tNprUI9klQW5FAFVM4Sa+AbPFuVQByWhP1ttNUAjIWg= +go.lsp.dev/protocol v0.12.0/go.mod h1:Qb11/HgZQ72qQbeyPfJbu3hZBH23s1sr4st8czGeDMQ= +go.lsp.dev/uri v0.3.0 h1:KcZJmh6nFIBeJzTugn5JTU6OOyG0lDOo3R9KwTxTYbo= +go.lsp.dev/uri v0.3.0/go.mod h1:P5sbO1IQR+qySTWOCnhnK7phBx+W3zbLqSMDJNTw88I= go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.7.0/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= @@ -2677,6 +2689,7 @@ golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211023085530-d6a326fbbf70/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211110154304-99a53858aa08/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/healthmanager/blockchain_health_manager.go b/healthmanager/blockchain_health_manager.go index 7fcdf5d6d92..234ac32d2fb 100644 --- a/healthmanager/blockchain_health_manager.go +++ b/healthmanager/blockchain_health_manager.go @@ -4,6 +4,7 @@ import ( "context" "sync" + status_common "github.com/status-im/status-go/common" "github.com/status-im/status-go/healthmanager/aggregator" "github.com/status-im/status-go/healthmanager/rpcstatus" ) @@ -72,6 +73,7 @@ func (b *BlockchainHealthManager) RegisterProvidersHealthManager(ctx context.Con statusCh := phm.Subscribe() b.wg.Add(1) go func(phm *ProvidersHealthManager, statusCh chan struct{}, providerCtx context.Context) { + defer status_common.LogOnPanic() defer func() { phm.Unsubscribe(statusCh) b.wg.Done() diff --git a/metrics/metrics.go b/metrics/metrics.go index be04df702c7..35a2c838866 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -11,6 +11,8 @@ import ( prom "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" + + "github.com/status-im/status-go/common" ) // Server runs and controls a HTTP pprof interface. @@ -55,5 +57,6 @@ func Handler(reg metrics.Registry) http.Handler { // Listen starts the HTTP server in the background. func (p *Server) Listen() { + defer common.LogOnPanic() log.Info("metrics server stopped", "err", p.server.ListenAndServe()) } diff --git a/protocol/encryption/publisher/publisher.go b/protocol/encryption/publisher/publisher.go index e7b58707499..5c450cf69a2 100644 --- a/protocol/encryption/publisher/publisher.go +++ b/protocol/encryption/publisher/publisher.go @@ -73,6 +73,7 @@ func (p *Publisher) Stop() { } func (p *Publisher) tickerLoop() { + defer gocommon.LogOnPanic() ticker := time.NewTicker(tickerInterval * time.Second) go func() { diff --git a/protocol/messenger.go b/protocol/messenger.go index d48dca99885..7d9c455f8c0 100644 --- a/protocol/messenger.go +++ b/protocol/messenger.go @@ -581,7 +581,7 @@ func NewMessenger( if c.wakuService != nil { c.wakuService.SetStatusTelemetryClient(telemetryClient) } - go telemetryClient.Start(ctx) + telemetryClient.Start(ctx) } messenger = &Messenger{ @@ -916,7 +916,7 @@ func (m *Messenger) Start() (*MessengerResponse, error) { for _, c := range controlledCommunities { if c.Joined() && c.HasTokenPermissions() { - go m.communitiesManager.StartMembersReevaluationLoop(c.ID(), false) + m.communitiesManager.StartMembersReevaluationLoop(c.ID(), false) } } diff --git a/protocol/messenger_store_node_request_manager.go b/protocol/messenger_store_node_request_manager.go index 84401003f2f..f0791b2bc3f 100644 --- a/protocol/messenger_store_node_request_manager.go +++ b/protocol/messenger_store_node_request_manager.go @@ -500,6 +500,8 @@ func (r *storeNodeRequest) shouldFetchNextPage(envelopesCount int) (bool, uint32 } func (r *storeNodeRequest) routine() { + defer gocommon.LogOnPanic() + r.manager.logger.Info("starting store node request", zap.Any("requestID", r.requestID), zap.String("pubsubTopic", r.pubsubTopic), diff --git a/rpc/client.go b/rpc/client.go index b492a84b440..77a44e79b14 100644 --- a/rpc/client.go +++ b/rpc/client.go @@ -188,6 +188,7 @@ func (c *Client) Stop() { } func (c *Client) monitorHealth(ctx context.Context, statusCh chan struct{}) { + defer appCommon.LogOnPanic() sendFullStatusEventFunc := func() { blockchainStatus := c.healthMgr.GetFullStatus() encodedMessage, err := json.Marshal(blockchainStatus) diff --git a/services/wallet/api.go b/services/wallet/api.go index 93402387387..22ca140aaaf 100644 --- a/services/wallet/api.go +++ b/services/wallet/api.go @@ -18,6 +18,7 @@ import ( signercore "github.com/ethereum/go-ethereum/signer/core/apitypes" abi_spec "github.com/status-im/status-go/abi-spec" "github.com/status-im/status-go/account" + status_common "github.com/status-im/status-go/common" statusErrors "github.com/status-im/status-go/errors" "github.com/status-im/status-go/eth-node/crypto" "github.com/status-im/status-go/eth-node/types" @@ -779,6 +780,7 @@ func (api *API) BuildTransactionsFromRoute(ctx context.Context, buildInputParams log.Debug("[WalletAPI::BuildTransactionsFromRoute] builds transactions from the generated best route", "uuid", buildInputParams.Uuid) go func() { + defer status_common.LogOnPanic() api.router.StopSuggestedRoutesAsyncCalculation() var err error @@ -841,6 +843,7 @@ func (api *API) ProceedWithTransactionsSignatures(ctx context.Context, signature func (api *API) SendRouterTransactionsWithSignatures(ctx context.Context, sendInputParams *requests.RouterSendTransactionsParams) { log.Debug("[WalletAPI:: SendRouterTransactionsWithSignatures] sign with signatures and send") go func() { + defer status_common.LogOnPanic() var ( err error @@ -927,6 +930,7 @@ func (api *API) SendRouterTransactionsWithSignatures(ctx context.Context, sendIn chainIDs = append(chainIDs, tx.FromChain) addresses = append(addresses, common.Address(tx.FromAddress)) go func(chainId uint64, txHash common.Hash) { + defer status_common.LogOnPanic() err = api.s.transactionManager.WatchTransaction(context.Background(), chainId, txHash) if err != nil { return diff --git a/services/wallet/balance/ttl_cache.go b/services/wallet/balance/ttl_cache.go index a438658bb2d..596bec390e1 100644 --- a/services/wallet/balance/ttl_cache.go +++ b/services/wallet/balance/ttl_cache.go @@ -8,6 +8,7 @@ import ( "github.com/jellydator/ttlcache/v3" "github.com/ethereum/go-ethereum/log" + "github.com/status-im/status-go/common" ) var ( @@ -56,7 +57,10 @@ func (c *ttlCache[K, V]) init() { c.cache.OnEviction(func(ctx context.Context, reason ttlcache.EvictionReason, item *ttlcache.Item[K, V]) { log.Debug("Evicting item from balance/nonce cache", "reason", reason, "key", item.Key, "value", item.Value) }) - go c.cache.Start() // starts automatic expired item deletion + go func() { // starts automatic expired item deletion + defer common.LogOnPanic() + c.cache.Start() + }() } //nolint:golint,unused // linter does not detect using it via reflect diff --git a/services/wallet/common/mock/feed_subscription.go b/services/wallet/common/mock/feed_subscription.go index b508ded5fe3..3842b1911ea 100644 --- a/services/wallet/common/mock/feed_subscription.go +++ b/services/wallet/common/mock/feed_subscription.go @@ -4,6 +4,7 @@ import ( "time" "github.com/ethereum/go-ethereum/event" + "github.com/status-im/status-go/common" "github.com/status-im/status-go/services/wallet/walletevent" ) @@ -20,6 +21,7 @@ func NewFeedSubscription(feed *event.Feed) *FeedSubscription { subscription := feed.Subscribe(events) go func() { + defer common.LogOnPanic() <-done subscription.Unsubscribe() close(events) diff --git a/vendor/github.com/segmentio/asm/LICENSE b/vendor/github.com/segmentio/asm/LICENSE new file mode 100644 index 00000000000..29e1ab6b05f --- /dev/null +++ b/vendor/github.com/segmentio/asm/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Segment + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/segmentio/asm/ascii/ascii.go b/vendor/github.com/segmentio/asm/ascii/ascii.go new file mode 100644 index 00000000000..4805146dd6b --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/ascii.go @@ -0,0 +1,53 @@ +package ascii + +import _ "github.com/segmentio/asm/cpu" + +// https://graphics.stanford.edu/~seander/bithacks.html#HasLessInWord +const ( + hasLessConstL64 = (^uint64(0)) / 255 + hasLessConstR64 = hasLessConstL64 * 128 + + hasLessConstL32 = (^uint32(0)) / 255 + hasLessConstR32 = hasLessConstL32 * 128 + + hasMoreConstL64 = (^uint64(0)) / 255 + hasMoreConstR64 = hasMoreConstL64 * 128 + + hasMoreConstL32 = (^uint32(0)) / 255 + hasMoreConstR32 = hasMoreConstL32 * 128 +) + +func hasLess64(x, n uint64) bool { + return ((x - (hasLessConstL64 * n)) & ^x & hasLessConstR64) != 0 +} + +func hasLess32(x, n uint32) bool { + return ((x - (hasLessConstL32 * n)) & ^x & hasLessConstR32) != 0 +} + +func hasMore64(x, n uint64) bool { + return (((x + (hasMoreConstL64 * (127 - n))) | x) & hasMoreConstR64) != 0 +} + +func hasMore32(x, n uint32) bool { + return (((x + (hasMoreConstL32 * (127 - n))) | x) & hasMoreConstR32) != 0 +} + +var lowerCase = [256]byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, + 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, + 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, + 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, + 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, + 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, + 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, + 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, +} diff --git a/vendor/github.com/segmentio/asm/ascii/equal_fold.go b/vendor/github.com/segmentio/asm/ascii/equal_fold.go new file mode 100644 index 00000000000..d90d8cafccf --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/equal_fold.go @@ -0,0 +1,30 @@ +package ascii + +import ( + "github.com/segmentio/asm/internal/unsafebytes" +) + +// EqualFold is a version of bytes.EqualFold designed to work on ASCII input +// instead of UTF-8. +// +// When the program has guarantees that the input is composed of ASCII +// characters only, it allows for greater optimizations. +func EqualFold(a, b []byte) bool { + return EqualFoldString(unsafebytes.String(a), unsafebytes.String(b)) +} + +func HasPrefixFold(s, prefix []byte) bool { + return len(s) >= len(prefix) && EqualFold(s[:len(prefix)], prefix) +} + +func HasSuffixFold(s, suffix []byte) bool { + return len(s) >= len(suffix) && EqualFold(s[len(s)-len(suffix):], suffix) +} + +func HasPrefixFoldString(s, prefix string) bool { + return len(s) >= len(prefix) && EqualFoldString(s[:len(prefix)], prefix) +} + +func HasSuffixFoldString(s, suffix string) bool { + return len(s) >= len(suffix) && EqualFoldString(s[len(s)-len(suffix):], suffix) +} diff --git a/vendor/github.com/segmentio/asm/ascii/equal_fold_amd64.go b/vendor/github.com/segmentio/asm/ascii/equal_fold_amd64.go new file mode 100644 index 00000000000..07cf6cdb486 --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/equal_fold_amd64.go @@ -0,0 +1,13 @@ +// Code generated by command: go run equal_fold_asm.go -pkg ascii -out ../ascii/equal_fold_amd64.s -stubs ../ascii/equal_fold_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +package ascii + +// EqualFoldString is a version of strings.EqualFold designed to work on ASCII +// input instead of UTF-8. +// +// When the program has guarantees that the input is composed of ASCII +// characters only, it allows for greater optimizations. +func EqualFoldString(a string, b string) bool diff --git a/vendor/github.com/segmentio/asm/ascii/equal_fold_amd64.s b/vendor/github.com/segmentio/asm/ascii/equal_fold_amd64.s new file mode 100644 index 00000000000..34495a622b4 --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/equal_fold_amd64.s @@ -0,0 +1,304 @@ +// Code generated by command: go run equal_fold_asm.go -pkg ascii -out ../ascii/equal_fold_amd64.s -stubs ../ascii/equal_fold_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +#include "textflag.h" + +// func EqualFoldString(a string, b string) bool +// Requires: AVX, AVX2, SSE4.1 +TEXT ·EqualFoldString(SB), NOSPLIT, $0-33 + MOVQ a_base+0(FP), CX + MOVQ a_len+8(FP), DX + MOVQ b_base+16(FP), BX + CMPQ DX, b_len+24(FP) + JNE done + XORQ AX, AX + CMPQ DX, $0x10 + JB init_x86 + BTL $0x08, github.com∕segmentio∕asm∕cpu·X86+0(SB) + JCS init_avx + +init_x86: + LEAQ github.com∕segmentio∕asm∕ascii·lowerCase+0(SB), R9 + XORL SI, SI + +cmp8: + CMPQ DX, $0x08 + JB cmp7 + MOVBLZX (CX)(AX*1), DI + MOVBLZX (BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + MOVBLZX 1(CX)(AX*1), DI + MOVBLZX 1(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + MOVBLZX 2(CX)(AX*1), DI + MOVBLZX 2(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + MOVBLZX 3(CX)(AX*1), DI + MOVBLZX 3(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + MOVBLZX 4(CX)(AX*1), DI + MOVBLZX 4(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + MOVBLZX 5(CX)(AX*1), DI + MOVBLZX 5(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + MOVBLZX 6(CX)(AX*1), DI + MOVBLZX 6(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + MOVBLZX 7(CX)(AX*1), DI + MOVBLZX 7(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + JNE done + ADDQ $0x08, AX + SUBQ $0x08, DX + JMP cmp8 + +cmp7: + CMPQ DX, $0x07 + JB cmp6 + MOVBLZX 6(CX)(AX*1), DI + MOVBLZX 6(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + +cmp6: + CMPQ DX, $0x06 + JB cmp5 + MOVBLZX 5(CX)(AX*1), DI + MOVBLZX 5(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + +cmp5: + CMPQ DX, $0x05 + JB cmp4 + MOVBLZX 4(CX)(AX*1), DI + MOVBLZX 4(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + +cmp4: + CMPQ DX, $0x04 + JB cmp3 + MOVBLZX 3(CX)(AX*1), DI + MOVBLZX 3(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + +cmp3: + CMPQ DX, $0x03 + JB cmp2 + MOVBLZX 2(CX)(AX*1), DI + MOVBLZX 2(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + +cmp2: + CMPQ DX, $0x02 + JB cmp1 + MOVBLZX 1(CX)(AX*1), DI + MOVBLZX 1(BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + +cmp1: + CMPQ DX, $0x01 + JB success + MOVBLZX (CX)(AX*1), DI + MOVBLZX (BX)(AX*1), R8 + MOVB (R9)(DI*1), DI + XORB (R9)(R8*1), DI + ORB DI, SI + +done: + SETEQ ret+32(FP) + RET + +success: + MOVB $0x01, ret+32(FP) + RET + +init_avx: + MOVB $0x20, SI + PINSRB $0x00, SI, X12 + VPBROADCASTB X12, Y12 + MOVB $0x1f, SI + PINSRB $0x00, SI, X13 + VPBROADCASTB X13, Y13 + MOVB $0x9a, SI + PINSRB $0x00, SI, X14 + VPBROADCASTB X14, Y14 + MOVB $0x01, SI + PINSRB $0x00, SI, X15 + VPBROADCASTB X15, Y15 + +cmp128: + CMPQ DX, $0x80 + JB cmp64 + VMOVDQU (CX)(AX*1), Y0 + VMOVDQU 32(CX)(AX*1), Y1 + VMOVDQU 64(CX)(AX*1), Y2 + VMOVDQU 96(CX)(AX*1), Y3 + VMOVDQU (BX)(AX*1), Y4 + VMOVDQU 32(BX)(AX*1), Y5 + VMOVDQU 64(BX)(AX*1), Y6 + VMOVDQU 96(BX)(AX*1), Y7 + VXORPD Y0, Y4, Y4 + VPCMPEQB Y12, Y4, Y8 + VORPD Y12, Y0, Y0 + VPADDB Y13, Y0, Y0 + VPCMPGTB Y0, Y14, Y0 + VPAND Y8, Y0, Y0 + VPAND Y15, Y0, Y0 + VPSLLW $0x05, Y0, Y0 + VPCMPEQB Y4, Y0, Y0 + VXORPD Y1, Y5, Y5 + VPCMPEQB Y12, Y5, Y9 + VORPD Y12, Y1, Y1 + VPADDB Y13, Y1, Y1 + VPCMPGTB Y1, Y14, Y1 + VPAND Y9, Y1, Y1 + VPAND Y15, Y1, Y1 + VPSLLW $0x05, Y1, Y1 + VPCMPEQB Y5, Y1, Y1 + VXORPD Y2, Y6, Y6 + VPCMPEQB Y12, Y6, Y10 + VORPD Y12, Y2, Y2 + VPADDB Y13, Y2, Y2 + VPCMPGTB Y2, Y14, Y2 + VPAND Y10, Y2, Y2 + VPAND Y15, Y2, Y2 + VPSLLW $0x05, Y2, Y2 + VPCMPEQB Y6, Y2, Y2 + VXORPD Y3, Y7, Y7 + VPCMPEQB Y12, Y7, Y11 + VORPD Y12, Y3, Y3 + VPADDB Y13, Y3, Y3 + VPCMPGTB Y3, Y14, Y3 + VPAND Y11, Y3, Y3 + VPAND Y15, Y3, Y3 + VPSLLW $0x05, Y3, Y3 + VPCMPEQB Y7, Y3, Y3 + VPAND Y1, Y0, Y0 + VPAND Y3, Y2, Y2 + VPAND Y2, Y0, Y0 + ADDQ $0x80, AX + SUBQ $0x80, DX + VPMOVMSKB Y0, SI + XORL $0xffffffff, SI + JNE done + JMP cmp128 + +cmp64: + CMPQ DX, $0x40 + JB cmp32 + VMOVDQU (CX)(AX*1), Y0 + VMOVDQU 32(CX)(AX*1), Y1 + VMOVDQU (BX)(AX*1), Y2 + VMOVDQU 32(BX)(AX*1), Y3 + VXORPD Y0, Y2, Y2 + VPCMPEQB Y12, Y2, Y4 + VORPD Y12, Y0, Y0 + VPADDB Y13, Y0, Y0 + VPCMPGTB Y0, Y14, Y0 + VPAND Y4, Y0, Y0 + VPAND Y15, Y0, Y0 + VPSLLW $0x05, Y0, Y0 + VPCMPEQB Y2, Y0, Y0 + VXORPD Y1, Y3, Y3 + VPCMPEQB Y12, Y3, Y5 + VORPD Y12, Y1, Y1 + VPADDB Y13, Y1, Y1 + VPCMPGTB Y1, Y14, Y1 + VPAND Y5, Y1, Y1 + VPAND Y15, Y1, Y1 + VPSLLW $0x05, Y1, Y1 + VPCMPEQB Y3, Y1, Y1 + VPAND Y1, Y0, Y0 + ADDQ $0x40, AX + SUBQ $0x40, DX + VPMOVMSKB Y0, SI + XORL $0xffffffff, SI + JNE done + +cmp32: + CMPQ DX, $0x20 + JB cmp16 + VMOVDQU (CX)(AX*1), Y0 + VMOVDQU (BX)(AX*1), Y1 + VXORPD Y0, Y1, Y1 + VPCMPEQB Y12, Y1, Y2 + VORPD Y12, Y0, Y0 + VPADDB Y13, Y0, Y0 + VPCMPGTB Y0, Y14, Y0 + VPAND Y2, Y0, Y0 + VPAND Y15, Y0, Y0 + VPSLLW $0x05, Y0, Y0 + VPCMPEQB Y1, Y0, Y0 + ADDQ $0x20, AX + SUBQ $0x20, DX + VPMOVMSKB Y0, SI + XORL $0xffffffff, SI + JNE done + +cmp16: + CMPQ DX, $0x10 + JLE cmp_tail + VMOVDQU (CX)(AX*1), X0 + VMOVDQU (BX)(AX*1), X1 + VXORPD X0, X1, X1 + VPCMPEQB X12, X1, X2 + VORPD X12, X0, X0 + VPADDB X13, X0, X0 + VPCMPGTB X0, X14, X0 + VPAND X2, X0, X0 + VPAND X15, X0, X0 + VPSLLW $0x05, X0, X0 + VPCMPEQB X1, X0, X0 + ADDQ $0x10, AX + SUBQ $0x10, DX + VPMOVMSKB X0, SI + XORL $0x0000ffff, SI + JNE done + +cmp_tail: + SUBQ $0x10, DX + ADDQ DX, AX + VMOVDQU (CX)(AX*1), X0 + VMOVDQU (BX)(AX*1), X1 + VXORPD X0, X1, X1 + VPCMPEQB X12, X1, X2 + VORPD X12, X0, X0 + VPADDB X13, X0, X0 + VPCMPGTB X0, X14, X0 + VPAND X2, X0, X0 + VPAND X15, X0, X0 + VPSLLW $0x05, X0, X0 + VPCMPEQB X1, X0, X0 + VPMOVMSKB X0, AX + XORL $0x0000ffff, AX + JMP done diff --git a/vendor/github.com/segmentio/asm/ascii/equal_fold_default.go b/vendor/github.com/segmentio/asm/ascii/equal_fold_default.go new file mode 100644 index 00000000000..1ae5a13a5b8 --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/equal_fold_default.go @@ -0,0 +1,60 @@ +//go:build purego || !amd64 +// +build purego !amd64 + +package ascii + +// EqualFoldString is a version of strings.EqualFold designed to work on ASCII +// input instead of UTF-8. +// +// When the program has guarantees that the input is composed of ASCII +// characters only, it allows for greater optimizations. +func EqualFoldString(a, b string) bool { + if len(a) != len(b) { + return false + } + + var cmp byte + + for len(a) >= 8 { + cmp |= lowerCase[a[0]] ^ lowerCase[b[0]] + cmp |= lowerCase[a[1]] ^ lowerCase[b[1]] + cmp |= lowerCase[a[2]] ^ lowerCase[b[2]] + cmp |= lowerCase[a[3]] ^ lowerCase[b[3]] + cmp |= lowerCase[a[4]] ^ lowerCase[b[4]] + cmp |= lowerCase[a[5]] ^ lowerCase[b[5]] + cmp |= lowerCase[a[6]] ^ lowerCase[b[6]] + cmp |= lowerCase[a[7]] ^ lowerCase[b[7]] + + if cmp != 0 { + return false + } + + a = a[8:] + b = b[8:] + } + + switch len(a) { + case 7: + cmp |= lowerCase[a[6]] ^ lowerCase[b[6]] + fallthrough + case 6: + cmp |= lowerCase[a[5]] ^ lowerCase[b[5]] + fallthrough + case 5: + cmp |= lowerCase[a[4]] ^ lowerCase[b[4]] + fallthrough + case 4: + cmp |= lowerCase[a[3]] ^ lowerCase[b[3]] + fallthrough + case 3: + cmp |= lowerCase[a[2]] ^ lowerCase[b[2]] + fallthrough + case 2: + cmp |= lowerCase[a[1]] ^ lowerCase[b[1]] + fallthrough + case 1: + cmp |= lowerCase[a[0]] ^ lowerCase[b[0]] + } + + return cmp == 0 +} diff --git a/vendor/github.com/segmentio/asm/ascii/valid.go b/vendor/github.com/segmentio/asm/ascii/valid.go new file mode 100644 index 00000000000..a5168ef5951 --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/valid.go @@ -0,0 +1,18 @@ +package ascii + +import "github.com/segmentio/asm/internal/unsafebytes" + +// Valid returns true if b contains only ASCII characters. +func Valid(b []byte) bool { + return ValidString(unsafebytes.String(b)) +} + +// ValidBytes returns true if b is an ASCII character. +func ValidByte(b byte) bool { + return b <= 0x7f +} + +// ValidBytes returns true if b is an ASCII character. +func ValidRune(r rune) bool { + return r <= 0x7f +} diff --git a/vendor/github.com/segmentio/asm/ascii/valid_amd64.go b/vendor/github.com/segmentio/asm/ascii/valid_amd64.go new file mode 100644 index 00000000000..72dc7b435de --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/valid_amd64.go @@ -0,0 +1,9 @@ +// Code generated by command: go run valid_asm.go -pkg ascii -out ../ascii/valid_amd64.s -stubs ../ascii/valid_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +package ascii + +// ValidString returns true if s contains only ASCII characters. +func ValidString(s string) bool diff --git a/vendor/github.com/segmentio/asm/ascii/valid_amd64.s b/vendor/github.com/segmentio/asm/ascii/valid_amd64.s new file mode 100644 index 00000000000..0214b0ce9d0 --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/valid_amd64.s @@ -0,0 +1,132 @@ +// Code generated by command: go run valid_asm.go -pkg ascii -out ../ascii/valid_amd64.s -stubs ../ascii/valid_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +#include "textflag.h" + +// func ValidString(s string) bool +// Requires: AVX, AVX2, SSE4.1 +TEXT ·ValidString(SB), NOSPLIT, $0-17 + MOVQ s_base+0(FP), AX + MOVQ s_len+8(FP), CX + MOVQ $0x8080808080808080, DX + CMPQ CX, $0x10 + JB cmp8 + BTL $0x08, github.com∕segmentio∕asm∕cpu·X86+0(SB) + JCS init_avx + +cmp8: + CMPQ CX, $0x08 + JB cmp4 + TESTQ DX, (AX) + JNZ invalid + ADDQ $0x08, AX + SUBQ $0x08, CX + JMP cmp8 + +cmp4: + CMPQ CX, $0x04 + JB cmp3 + TESTL $0x80808080, (AX) + JNZ invalid + ADDQ $0x04, AX + SUBQ $0x04, CX + +cmp3: + CMPQ CX, $0x03 + JB cmp2 + MOVWLZX (AX), CX + MOVBLZX 2(AX), AX + SHLL $0x10, AX + ORL CX, AX + TESTL $0x80808080, AX + JMP done + +cmp2: + CMPQ CX, $0x02 + JB cmp1 + TESTW $0x8080, (AX) + JMP done + +cmp1: + CMPQ CX, $0x00 + JE done + TESTB $0x80, (AX) + +done: + SETEQ ret+16(FP) + RET + +invalid: + MOVB $0x00, ret+16(FP) + RET + +init_avx: + PINSRQ $0x00, DX, X4 + VPBROADCASTQ X4, Y4 + +cmp256: + CMPQ CX, $0x00000100 + JB cmp128 + VMOVDQU (AX), Y0 + VPOR 32(AX), Y0, Y0 + VMOVDQU 64(AX), Y1 + VPOR 96(AX), Y1, Y1 + VMOVDQU 128(AX), Y2 + VPOR 160(AX), Y2, Y2 + VMOVDQU 192(AX), Y3 + VPOR 224(AX), Y3, Y3 + VPOR Y1, Y0, Y0 + VPOR Y3, Y2, Y2 + VPOR Y2, Y0, Y0 + VPTEST Y0, Y4 + JNZ invalid + ADDQ $0x00000100, AX + SUBQ $0x00000100, CX + JMP cmp256 + +cmp128: + CMPQ CX, $0x80 + JB cmp64 + VMOVDQU (AX), Y0 + VPOR 32(AX), Y0, Y0 + VMOVDQU 64(AX), Y1 + VPOR 96(AX), Y1, Y1 + VPOR Y1, Y0, Y0 + VPTEST Y0, Y4 + JNZ invalid + ADDQ $0x80, AX + SUBQ $0x80, CX + +cmp64: + CMPQ CX, $0x40 + JB cmp32 + VMOVDQU (AX), Y0 + VPOR 32(AX), Y0, Y0 + VPTEST Y0, Y4 + JNZ invalid + ADDQ $0x40, AX + SUBQ $0x40, CX + +cmp32: + CMPQ CX, $0x20 + JB cmp16 + VPTEST (AX), Y4 + JNZ invalid + ADDQ $0x20, AX + SUBQ $0x20, CX + +cmp16: + CMPQ CX, $0x10 + JLE cmp_tail + VPTEST (AX), X4 + JNZ invalid + ADDQ $0x10, AX + SUBQ $0x10, CX + +cmp_tail: + SUBQ $0x10, CX + ADDQ CX, AX + VPTEST (AX), X4 + JMP done diff --git a/vendor/github.com/segmentio/asm/ascii/valid_default.go b/vendor/github.com/segmentio/asm/ascii/valid_default.go new file mode 100644 index 00000000000..715a090d495 --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/valid_default.go @@ -0,0 +1,48 @@ +//go:build purego || !amd64 +// +build purego !amd64 + +package ascii + +import ( + "unsafe" +) + +// ValidString returns true if s contains only ASCII characters. +func ValidString(s string) bool { + p := *(*unsafe.Pointer)(unsafe.Pointer(&s)) + i := uintptr(0) + n := uintptr(len(s)) + + for i+8 <= n { + if (*(*uint64)(unsafe.Pointer(uintptr(p) + i)) & 0x8080808080808080) != 0 { + return false + } + i += 8 + } + + if i+4 <= n { + if (*(*uint32)(unsafe.Pointer(uintptr(p) + i)) & 0x80808080) != 0 { + return false + } + i += 4 + } + + if i == n { + return true + } + + p = unsafe.Pointer(uintptr(p) + i) + + var x uint32 + switch n - i { + case 3: + x = uint32(*(*uint16)(p)) | uint32(*(*uint8)(unsafe.Pointer(uintptr(p) + 2)))<<16 + case 2: + x = uint32(*(*uint16)(p)) + case 1: + x = uint32(*(*uint8)(p)) + default: + return true + } + return (x & 0x80808080) == 0 +} diff --git a/vendor/github.com/segmentio/asm/ascii/valid_print.go b/vendor/github.com/segmentio/asm/ascii/valid_print.go new file mode 100644 index 00000000000..aa0db7f6551 --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/valid_print.go @@ -0,0 +1,18 @@ +package ascii + +import "github.com/segmentio/asm/internal/unsafebytes" + +// ValidPrint returns true if b contains only printable ASCII characters. +func ValidPrint(b []byte) bool { + return ValidPrintString(unsafebytes.String(b)) +} + +// ValidPrintBytes returns true if b is an ASCII character. +func ValidPrintByte(b byte) bool { + return 0x20 <= b && b <= 0x7e +} + +// ValidPrintBytes returns true if b is an ASCII character. +func ValidPrintRune(r rune) bool { + return 0x20 <= r && r <= 0x7e +} diff --git a/vendor/github.com/segmentio/asm/ascii/valid_print_amd64.go b/vendor/github.com/segmentio/asm/ascii/valid_print_amd64.go new file mode 100644 index 00000000000..b14626666d4 --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/valid_print_amd64.go @@ -0,0 +1,9 @@ +// Code generated by command: go run valid_print_asm.go -pkg ascii -out ../ascii/valid_print_amd64.s -stubs ../ascii/valid_print_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +package ascii + +// ValidPrintString returns true if s contains only printable ASCII characters. +func ValidPrintString(s string) bool diff --git a/vendor/github.com/segmentio/asm/ascii/valid_print_amd64.s b/vendor/github.com/segmentio/asm/ascii/valid_print_amd64.s new file mode 100644 index 00000000000..bc2e20a23b6 --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/valid_print_amd64.s @@ -0,0 +1,185 @@ +// Code generated by command: go run valid_print_asm.go -pkg ascii -out ../ascii/valid_print_amd64.s -stubs ../ascii/valid_print_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +#include "textflag.h" + +// func ValidPrintString(s string) bool +// Requires: AVX, AVX2, SSE4.1 +TEXT ·ValidPrintString(SB), NOSPLIT, $0-17 + MOVQ s_base+0(FP), AX + MOVQ s_len+8(FP), CX + CMPQ CX, $0x10 + JB init_x86 + BTL $0x08, github.com∕segmentio∕asm∕cpu·X86+0(SB) + JCS init_avx + +init_x86: + CMPQ CX, $0x08 + JB cmp4 + MOVQ $0xdfdfdfdfdfdfdfe0, DX + MOVQ $0x0101010101010101, BX + MOVQ $0x8080808080808080, SI + +cmp8: + MOVQ (AX), DI + MOVQ DI, R8 + LEAQ (DI)(DX*1), R9 + NOTQ R8 + ANDQ R8, R9 + LEAQ (DI)(BX*1), R8 + ORQ R8, DI + ORQ R9, DI + ADDQ $0x08, AX + SUBQ $0x08, CX + TESTQ SI, DI + JNE done + CMPQ CX, $0x08 + JB cmp4 + JMP cmp8 + +cmp4: + CMPQ CX, $0x04 + JB cmp3 + MOVL (AX), DX + MOVL DX, BX + LEAL 3755991008(DX), SI + NOTL BX + ANDL BX, SI + LEAL 16843009(DX), BX + ORL BX, DX + ORL SI, DX + ADDQ $0x04, AX + SUBQ $0x04, CX + TESTL $0x80808080, DX + JNE done + +cmp3: + CMPQ CX, $0x03 + JB cmp2 + MOVWLZX (AX), DX + MOVBLZX 2(AX), AX + SHLL $0x10, AX + ORL DX, AX + ORL $0x20000000, AX + JMP final + +cmp2: + CMPQ CX, $0x02 + JB cmp1 + MOVWLZX (AX), AX + ORL $0x20200000, AX + JMP final + +cmp1: + CMPQ CX, $0x00 + JE done + MOVBLZX (AX), AX + ORL $0x20202000, AX + +final: + MOVL AX, CX + LEAL 3755991008(AX), DX + NOTL CX + ANDL CX, DX + LEAL 16843009(AX), CX + ORL CX, AX + ORL DX, AX + TESTL $0x80808080, AX + +done: + SETEQ ret+16(FP) + RET + +init_avx: + MOVB $0x1f, DL + PINSRB $0x00, DX, X8 + VPBROADCASTB X8, Y8 + MOVB $0x7e, DL + PINSRB $0x00, DX, X9 + VPBROADCASTB X9, Y9 + +cmp128: + CMPQ CX, $0x80 + JB cmp64 + VMOVDQU (AX), Y0 + VMOVDQU 32(AX), Y1 + VMOVDQU 64(AX), Y2 + VMOVDQU 96(AX), Y3 + VPCMPGTB Y8, Y0, Y4 + VPCMPGTB Y9, Y0, Y0 + VPANDN Y4, Y0, Y0 + VPCMPGTB Y8, Y1, Y5 + VPCMPGTB Y9, Y1, Y1 + VPANDN Y5, Y1, Y1 + VPCMPGTB Y8, Y2, Y6 + VPCMPGTB Y9, Y2, Y2 + VPANDN Y6, Y2, Y2 + VPCMPGTB Y8, Y3, Y7 + VPCMPGTB Y9, Y3, Y3 + VPANDN Y7, Y3, Y3 + VPAND Y1, Y0, Y0 + VPAND Y3, Y2, Y2 + VPAND Y2, Y0, Y0 + ADDQ $0x80, AX + SUBQ $0x80, CX + VPMOVMSKB Y0, DX + XORL $0xffffffff, DX + JNE done + JMP cmp128 + +cmp64: + CMPQ CX, $0x40 + JB cmp32 + VMOVDQU (AX), Y0 + VMOVDQU 32(AX), Y1 + VPCMPGTB Y8, Y0, Y2 + VPCMPGTB Y9, Y0, Y0 + VPANDN Y2, Y0, Y0 + VPCMPGTB Y8, Y1, Y3 + VPCMPGTB Y9, Y1, Y1 + VPANDN Y3, Y1, Y1 + VPAND Y1, Y0, Y0 + ADDQ $0x40, AX + SUBQ $0x40, CX + VPMOVMSKB Y0, DX + XORL $0xffffffff, DX + JNE done + +cmp32: + CMPQ CX, $0x20 + JB cmp16 + VMOVDQU (AX), Y0 + VPCMPGTB Y8, Y0, Y1 + VPCMPGTB Y9, Y0, Y0 + VPANDN Y1, Y0, Y0 + ADDQ $0x20, AX + SUBQ $0x20, CX + VPMOVMSKB Y0, DX + XORL $0xffffffff, DX + JNE done + +cmp16: + CMPQ CX, $0x10 + JLE cmp_tail + VMOVDQU (AX), X0 + VPCMPGTB X8, X0, X1 + VPCMPGTB X9, X0, X0 + VPANDN X1, X0, X0 + ADDQ $0x10, AX + SUBQ $0x10, CX + VPMOVMSKB X0, DX + XORL $0x0000ffff, DX + JNE done + +cmp_tail: + SUBQ $0x10, CX + ADDQ CX, AX + VMOVDQU (AX), X0 + VPCMPGTB X8, X0, X1 + VPCMPGTB X9, X0, X0 + VPANDN X1, X0, X0 + VPMOVMSKB X0, DX + XORL $0x0000ffff, DX + JMP done diff --git a/vendor/github.com/segmentio/asm/ascii/valid_print_default.go b/vendor/github.com/segmentio/asm/ascii/valid_print_default.go new file mode 100644 index 00000000000..c4dc748b083 --- /dev/null +++ b/vendor/github.com/segmentio/asm/ascii/valid_print_default.go @@ -0,0 +1,46 @@ +//go:build purego || !amd64 +// +build purego !amd64 + +package ascii + +import "unsafe" + +// ValidString returns true if s contains only printable ASCII characters. +func ValidPrintString(s string) bool { + p := *(*unsafe.Pointer)(unsafe.Pointer(&s)) + i := uintptr(0) + n := uintptr(len(s)) + + for i+8 <= n { + if hasLess64(*(*uint64)(unsafe.Pointer(uintptr(p) + i)), 0x20) || hasMore64(*(*uint64)(unsafe.Pointer(uintptr(p) + i)), 0x7e) { + return false + } + i += 8 + } + + if i+4 <= n { + if hasLess32(*(*uint32)(unsafe.Pointer(uintptr(p) + i)), 0x20) || hasMore32(*(*uint32)(unsafe.Pointer(uintptr(p) + i)), 0x7e) { + return false + } + i += 4 + } + + if i == n { + return true + } + + p = unsafe.Pointer(uintptr(p) + i) + + var x uint32 + switch n - i { + case 3: + x = 0x20000000 | uint32(*(*uint16)(p)) | uint32(*(*uint8)(unsafe.Pointer(uintptr(p) + 2)))<<16 + case 2: + x = 0x20200000 | uint32(*(*uint16)(p)) + case 1: + x = 0x20202000 | uint32(*(*uint8)(p)) + default: + return true + } + return !(hasLess32(x, 0x20) || hasMore32(x, 0x7e)) +} diff --git a/vendor/github.com/segmentio/asm/base64/base64.go b/vendor/github.com/segmentio/asm/base64/base64.go new file mode 100644 index 00000000000..dd2128d4a95 --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/base64.go @@ -0,0 +1,67 @@ +package base64 + +import ( + "encoding/base64" +) + +const ( + StdPadding rune = base64.StdPadding + NoPadding rune = base64.NoPadding + + encodeStd = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" + encodeURL = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_" + encodeIMAP = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+," + + letterRange = int8('Z' - 'A' + 1) +) + +// StdEncoding is the standard base64 encoding, as defined in RFC 4648. +var StdEncoding = NewEncoding(encodeStd) + +// URLEncoding is the alternate base64 encoding defined in RFC 4648. +// It is typically used in URLs and file names. +var URLEncoding = NewEncoding(encodeURL) + +// RawStdEncoding is the standard unpadded base64 encoding defined in RFC 4648 section 3.2. +// This is the same as StdEncoding but omits padding characters. +var RawStdEncoding = StdEncoding.WithPadding(NoPadding) + +// RawURLEncoding is the unpadded alternate base64 encoding defined in RFC 4648. +// This is the same as URLEncoding but omits padding characters. +var RawURLEncoding = URLEncoding.WithPadding(NoPadding) + +// NewEncoding returns a new padded Encoding defined by the given alphabet, +// which must be a 64-byte string that does not contain the padding character +// or CR / LF ('\r', '\n'). Unlike the standard library, the encoding alphabet +// cannot be abitrary, and it must follow one of the know standard encoding +// variants. +// +// Required alphabet values: +// * [0,26): characters 'A'..'Z' +// * [26,52): characters 'a'..'z' +// * [52,62): characters '0'..'9' +// Flexible alphabet value options: +// * RFC 4648, RFC 1421, RFC 2045, RFC 2152, RFC 4880: '+' and '/' +// * RFC 4648 URI: '-' and '_' +// * RFC 3501: '+' and ',' +// +// The resulting Encoding uses the default padding character ('='), which may +// be changed or disabled via WithPadding. The padding characters is urestricted, +// but it must be a character outside of the encoder alphabet. +func NewEncoding(encoder string) *Encoding { + if len(encoder) != 64 { + panic("encoding alphabet is not 64-bytes long") + } + + if _, ok := allowedEncoding[encoder]; !ok { + panic("non-standard encoding alphabets are not supported") + } + + return newEncoding(encoder) +} + +var allowedEncoding = map[string]struct{}{ + encodeStd: {}, + encodeURL: {}, + encodeIMAP: {}, +} diff --git a/vendor/github.com/segmentio/asm/base64/base64_amd64.go b/vendor/github.com/segmentio/asm/base64/base64_amd64.go new file mode 100644 index 00000000000..e4940d781cf --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/base64_amd64.go @@ -0,0 +1,160 @@ +//go:build amd64 && !purego +// +build amd64,!purego + +package base64 + +import ( + "encoding/base64" + + "github.com/segmentio/asm/cpu" + "github.com/segmentio/asm/cpu/x86" + "github.com/segmentio/asm/internal/unsafebytes" +) + +// An Encoding is a radix 64 encoding/decoding scheme, defined by a +// 64-character alphabet. +type Encoding struct { + enc func(dst []byte, src []byte, lut *int8) (int, int) + enclut [32]int8 + + dec func(dst []byte, src []byte, lut *int8) (int, int) + declut [48]int8 + + base *base64.Encoding +} + +const ( + minEncodeLen = 28 + minDecodeLen = 45 +) + +func newEncoding(encoder string) *Encoding { + e := &Encoding{base: base64.NewEncoding(encoder)} + if cpu.X86.Has(x86.AVX2) { + e.enableEncodeAVX2(encoder) + e.enableDecodeAVX2(encoder) + } + return e +} + +func (e *Encoding) enableEncodeAVX2(encoder string) { + // Translate values 0..63 to the Base64 alphabet. There are five sets: + // + // From To Add Index Example + // [0..25] [65..90] +65 0 ABCDEFGHIJKLMNOPQRSTUVWXYZ + // [26..51] [97..122] +71 1 abcdefghijklmnopqrstuvwxyz + // [52..61] [48..57] -4 [2..11] 0123456789 + // [62] [43] -19 12 + + // [63] [47] -16 13 / + tab := [32]int8{int8(encoder[0]), int8(encoder[letterRange]) - letterRange} + for i, ch := range encoder[2*letterRange:] { + tab[2+i] = int8(ch) - 2*letterRange - int8(i) + } + + e.enc = encodeAVX2 + e.enclut = tab +} + +func (e *Encoding) enableDecodeAVX2(encoder string) { + c62, c63 := int8(encoder[62]), int8(encoder[63]) + url := c63 == '_' + if url { + c63 = '/' + } + + // Translate values from the Base64 alphabet using five sets. Values outside + // of these ranges are considered invalid: + // + // From To Add Index Example + // [47] [63] +16 1 / + // [43] [62] +19 2 + + // [48..57] [52..61] +4 3 0123456789 + // [65..90] [0..25] -65 4,5 ABCDEFGHIJKLMNOPQRSTUVWXYZ + // [97..122] [26..51] -71 6,7 abcdefghijklmnopqrstuvwxyz + tab := [48]int8{ + 0, 63 - c63, 62 - c62, 4, -65, -65, -71, -71, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x15, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + 0x11, 0x11, 0x13, 0x1B, 0x1B, 0x1B, 0x1B, 0x1B, + } + tab[(c62&15)+16] = 0x1A + tab[(c63&15)+16] = 0x1A + + if url { + e.dec = decodeAVX2URI + } else { + e.dec = decodeAVX2 + } + e.declut = tab +} + +// WithPadding creates a duplicate Encoding updated with a specified padding +// character, or NoPadding to disable padding. The padding character must not +// be contained in the encoding alphabet, must not be '\r' or '\n', and must +// be no greater than '\xFF'. +func (enc Encoding) WithPadding(padding rune) *Encoding { + enc.base = enc.base.WithPadding(padding) + return &enc +} + +// Strict creates a duplicate encoding updated with strict decoding enabled. +// This requires that trailing padding bits are zero. +func (enc Encoding) Strict() *Encoding { + enc.base = enc.base.Strict() + return &enc +} + +// Encode encodes src using the defined encoding alphabet. +// This will write EncodedLen(len(src)) bytes to dst. +func (enc *Encoding) Encode(dst, src []byte) { + if len(src) >= minEncodeLen && enc.enc != nil { + d, s := enc.enc(dst, src, &enc.enclut[0]) + dst = dst[d:] + src = src[s:] + } + enc.base.Encode(dst, src) +} + +// Encode encodes src using the encoding enc, writing +// EncodedLen(len(src)) bytes to dst. +func (enc *Encoding) EncodeToString(src []byte) string { + buf := make([]byte, enc.base.EncodedLen(len(src))) + enc.Encode(buf, src) + return string(buf) +} + +// EncodedLen calculates the base64-encoded byte length for a message +// of length n. +func (enc *Encoding) EncodedLen(n int) int { + return enc.base.EncodedLen(n) +} + +// Decode decodes src using the defined encoding alphabet. +// This will write DecodedLen(len(src)) bytes to dst and return the number of +// bytes written. +func (enc *Encoding) Decode(dst, src []byte) (n int, err error) { + var d, s int + if len(src) >= minDecodeLen && enc.dec != nil { + d, s = enc.dec(dst, src, &enc.declut[0]) + dst = dst[d:] + src = src[s:] + } + n, err = enc.base.Decode(dst, src) + n += d + return +} + +// DecodeString decodes the base64 encoded string s, returns the decoded +// value as bytes. +func (enc *Encoding) DecodeString(s string) ([]byte, error) { + src := unsafebytes.BytesOf(s) + dst := make([]byte, enc.base.DecodedLen(len(s))) + n, err := enc.Decode(dst, src) + return dst[:n], err +} + +// DecodedLen calculates the decoded byte length for a base64-encoded message +// of length n. +func (enc *Encoding) DecodedLen(n int) int { + return enc.base.DecodedLen(n) +} diff --git a/vendor/github.com/segmentio/asm/base64/base64_default.go b/vendor/github.com/segmentio/asm/base64/base64_default.go new file mode 100644 index 00000000000..f5d3d6470a8 --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/base64_default.go @@ -0,0 +1,14 @@ +//go:build purego || !amd64 +// +build purego !amd64 + +package base64 + +import "encoding/base64" + +// An Encoding is a radix 64 encoding/decoding scheme, defined by a +// 64-character alphabet. +type Encoding = base64.Encoding + +func newEncoding(encoder string) *Encoding { + return base64.NewEncoding(encoder) +} diff --git a/vendor/github.com/segmentio/asm/base64/decode_amd64.go b/vendor/github.com/segmentio/asm/base64/decode_amd64.go new file mode 100644 index 00000000000..1dae5b431b1 --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/decode_amd64.go @@ -0,0 +1,10 @@ +// Code generated by command: go run decode_asm.go -pkg base64 -out ../base64/decode_amd64.s -stubs ../base64/decode_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +package base64 + +func decodeAVX2(dst []byte, src []byte, lut *int8) (int, int) + +func decodeAVX2URI(dst []byte, src []byte, lut *int8) (int, int) diff --git a/vendor/github.com/segmentio/asm/base64/decode_amd64.s b/vendor/github.com/segmentio/asm/base64/decode_amd64.s new file mode 100644 index 00000000000..cc6c779dae4 --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/decode_amd64.s @@ -0,0 +1,144 @@ +// Code generated by command: go run decode_asm.go -pkg base64 -out ../base64/decode_amd64.s -stubs ../base64/decode_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +#include "textflag.h" + +DATA b64_dec_lut_hi<>+0(SB)/8, $0x0804080402011010 +DATA b64_dec_lut_hi<>+8(SB)/8, $0x1010101010101010 +DATA b64_dec_lut_hi<>+16(SB)/8, $0x0804080402011010 +DATA b64_dec_lut_hi<>+24(SB)/8, $0x1010101010101010 +GLOBL b64_dec_lut_hi<>(SB), RODATA|NOPTR, $32 + +DATA b64_dec_madd1<>+0(SB)/8, $0x0140014001400140 +DATA b64_dec_madd1<>+8(SB)/8, $0x0140014001400140 +DATA b64_dec_madd1<>+16(SB)/8, $0x0140014001400140 +DATA b64_dec_madd1<>+24(SB)/8, $0x0140014001400140 +GLOBL b64_dec_madd1<>(SB), RODATA|NOPTR, $32 + +DATA b64_dec_madd2<>+0(SB)/8, $0x0001100000011000 +DATA b64_dec_madd2<>+8(SB)/8, $0x0001100000011000 +DATA b64_dec_madd2<>+16(SB)/8, $0x0001100000011000 +DATA b64_dec_madd2<>+24(SB)/8, $0x0001100000011000 +GLOBL b64_dec_madd2<>(SB), RODATA|NOPTR, $32 + +DATA b64_dec_shuf_lo<>+0(SB)/8, $0x0000000000000000 +DATA b64_dec_shuf_lo<>+8(SB)/8, $0x0600010200000000 +GLOBL b64_dec_shuf_lo<>(SB), RODATA|NOPTR, $16 + +DATA b64_dec_shuf<>+0(SB)/8, $0x090a040506000102 +DATA b64_dec_shuf<>+8(SB)/8, $0x000000000c0d0e08 +DATA b64_dec_shuf<>+16(SB)/8, $0x0c0d0e08090a0405 +DATA b64_dec_shuf<>+24(SB)/8, $0x0000000000000000 +GLOBL b64_dec_shuf<>(SB), RODATA|NOPTR, $32 + +// func decodeAVX2(dst []byte, src []byte, lut *int8) (int, int) +// Requires: AVX, AVX2, SSE4.1 +TEXT ·decodeAVX2(SB), NOSPLIT, $0-72 + MOVQ dst_base+0(FP), AX + MOVQ src_base+24(FP), DX + MOVQ lut+48(FP), SI + MOVQ src_len+32(FP), DI + MOVB $0x2f, CL + PINSRB $0x00, CX, X8 + VPBROADCASTB X8, Y8 + XORQ CX, CX + XORQ BX, BX + VPXOR Y7, Y7, Y7 + VPERMQ $0x44, (SI), Y6 + VPERMQ $0x44, 16(SI), Y4 + VMOVDQA b64_dec_lut_hi<>+0(SB), Y5 + +loop: + VMOVDQU (DX)(BX*1), Y0 + VPSRLD $0x04, Y0, Y2 + VPAND Y8, Y0, Y3 + VPSHUFB Y3, Y4, Y3 + VPAND Y8, Y2, Y2 + VPSHUFB Y2, Y5, Y9 + VPTEST Y9, Y3 + JNE done + VPCMPEQB Y8, Y0, Y3 + VPADDB Y3, Y2, Y2 + VPSHUFB Y2, Y6, Y2 + VPADDB Y0, Y2, Y0 + VPMADDUBSW b64_dec_madd1<>+0(SB), Y0, Y0 + VPMADDWD b64_dec_madd2<>+0(SB), Y0, Y0 + VEXTRACTI128 $0x01, Y0, X1 + VPSHUFB b64_dec_shuf_lo<>+0(SB), X1, X1 + VPSHUFB b64_dec_shuf<>+0(SB), Y0, Y0 + VPBLENDD $0x08, Y1, Y0, Y1 + VPBLENDD $0xc0, Y7, Y1, Y1 + VMOVDQU Y1, (AX)(CX*1) + ADDQ $0x18, CX + ADDQ $0x20, BX + SUBQ $0x20, DI + CMPQ DI, $0x2d + JB done + JMP loop + +done: + MOVQ CX, ret+56(FP) + MOVQ BX, ret1+64(FP) + VZEROUPPER + RET + +// func decodeAVX2URI(dst []byte, src []byte, lut *int8) (int, int) +// Requires: AVX, AVX2, SSE4.1 +TEXT ·decodeAVX2URI(SB), NOSPLIT, $0-72 + MOVB $0x2f, AL + PINSRB $0x00, AX, X0 + VPBROADCASTB X0, Y0 + MOVB $0x5f, AL + PINSRB $0x00, AX, X1 + VPBROADCASTB X1, Y1 + MOVQ dst_base+0(FP), AX + MOVQ src_base+24(FP), DX + MOVQ lut+48(FP), SI + MOVQ src_len+32(FP), DI + MOVB $0x2f, CL + PINSRB $0x00, CX, X10 + VPBROADCASTB X10, Y10 + XORQ CX, CX + XORQ BX, BX + VPXOR Y9, Y9, Y9 + VPERMQ $0x44, (SI), Y8 + VPERMQ $0x44, 16(SI), Y6 + VMOVDQA b64_dec_lut_hi<>+0(SB), Y7 + +loop: + VMOVDQU (DX)(BX*1), Y2 + VPCMPEQB Y2, Y1, Y4 + VPBLENDVB Y4, Y0, Y2, Y2 + VPSRLD $0x04, Y2, Y4 + VPAND Y10, Y2, Y5 + VPSHUFB Y5, Y6, Y5 + VPAND Y10, Y4, Y4 + VPSHUFB Y4, Y7, Y11 + VPTEST Y11, Y5 + JNE done + VPCMPEQB Y10, Y2, Y5 + VPADDB Y5, Y4, Y4 + VPSHUFB Y4, Y8, Y4 + VPADDB Y2, Y4, Y2 + VPMADDUBSW b64_dec_madd1<>+0(SB), Y2, Y2 + VPMADDWD b64_dec_madd2<>+0(SB), Y2, Y2 + VEXTRACTI128 $0x01, Y2, X3 + VPSHUFB b64_dec_shuf_lo<>+0(SB), X3, X3 + VPSHUFB b64_dec_shuf<>+0(SB), Y2, Y2 + VPBLENDD $0x08, Y3, Y2, Y3 + VPBLENDD $0xc0, Y9, Y3, Y3 + VMOVDQU Y3, (AX)(CX*1) + ADDQ $0x18, CX + ADDQ $0x20, BX + SUBQ $0x20, DI + CMPQ DI, $0x2d + JB done + JMP loop + +done: + MOVQ CX, ret+56(FP) + MOVQ BX, ret1+64(FP) + VZEROUPPER + RET diff --git a/vendor/github.com/segmentio/asm/base64/encode_amd64.go b/vendor/github.com/segmentio/asm/base64/encode_amd64.go new file mode 100644 index 00000000000..c38060f7119 --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/encode_amd64.go @@ -0,0 +1,8 @@ +// Code generated by command: go run encode_asm.go -pkg base64 -out ../base64/encode_amd64.s -stubs ../base64/encode_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +package base64 + +func encodeAVX2(dst []byte, src []byte, lut *int8) (int, int) diff --git a/vendor/github.com/segmentio/asm/base64/encode_amd64.s b/vendor/github.com/segmentio/asm/base64/encode_amd64.s new file mode 100644 index 00000000000..2edd27aac88 --- /dev/null +++ b/vendor/github.com/segmentio/asm/base64/encode_amd64.s @@ -0,0 +1,88 @@ +// Code generated by command: go run encode_asm.go -pkg base64 -out ../base64/encode_amd64.s -stubs ../base64/encode_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +#include "textflag.h" + +// func encodeAVX2(dst []byte, src []byte, lut *int8) (int, int) +// Requires: AVX, AVX2, SSE4.1 +TEXT ·encodeAVX2(SB), NOSPLIT, $0-72 + MOVQ dst_base+0(FP), AX + MOVQ src_base+24(FP), DX + MOVQ lut+48(FP), SI + MOVQ src_len+32(FP), DI + MOVB $0x33, CL + PINSRB $0x00, CX, X4 + VPBROADCASTB X4, Y4 + MOVB $0x19, CL + PINSRB $0x00, CX, X5 + VPBROADCASTB X5, Y5 + XORQ CX, CX + XORQ BX, BX + + // Load the 16-byte LUT into both lanes of the register + VPERMQ $0x44, (SI), Y3 + + // Load the first block using a mask to avoid potential fault + VMOVDQU b64_enc_load<>+0(SB), Y0 + VPMASKMOVD -4(DX)(BX*1), Y0, Y0 + +loop: + VPSHUFB b64_enc_shuf<>+0(SB), Y0, Y0 + VPAND b64_enc_mask1<>+0(SB), Y0, Y1 + VPSLLW $0x08, Y1, Y2 + VPSLLW $0x04, Y1, Y1 + VPBLENDW $0xaa, Y2, Y1, Y2 + VPAND b64_enc_mask2<>+0(SB), Y0, Y1 + VPMULHUW b64_enc_mult<>+0(SB), Y1, Y0 + VPOR Y0, Y2, Y0 + VPSUBUSB Y4, Y0, Y1 + VPCMPGTB Y5, Y0, Y2 + VPSUBB Y2, Y1, Y1 + VPSHUFB Y1, Y3, Y1 + VPADDB Y0, Y1, Y0 + VMOVDQU Y0, (AX)(CX*1) + ADDQ $0x20, CX + ADDQ $0x18, BX + SUBQ $0x18, DI + CMPQ DI, $0x20 + JB done + VMOVDQU -4(DX)(BX*1), Y0 + JMP loop + +done: + MOVQ CX, ret+56(FP) + MOVQ BX, ret1+64(FP) + VZEROUPPER + RET + +DATA b64_enc_load<>+0(SB)/8, $0x8000000000000000 +DATA b64_enc_load<>+8(SB)/8, $0x8000000080000000 +DATA b64_enc_load<>+16(SB)/8, $0x8000000080000000 +DATA b64_enc_load<>+24(SB)/8, $0x8000000080000000 +GLOBL b64_enc_load<>(SB), RODATA|NOPTR, $32 + +DATA b64_enc_shuf<>+0(SB)/8, $0x0809070805060405 +DATA b64_enc_shuf<>+8(SB)/8, $0x0e0f0d0e0b0c0a0b +DATA b64_enc_shuf<>+16(SB)/8, $0x0405030401020001 +DATA b64_enc_shuf<>+24(SB)/8, $0x0a0b090a07080607 +GLOBL b64_enc_shuf<>(SB), RODATA|NOPTR, $32 + +DATA b64_enc_mask1<>+0(SB)/8, $0x003f03f0003f03f0 +DATA b64_enc_mask1<>+8(SB)/8, $0x003f03f0003f03f0 +DATA b64_enc_mask1<>+16(SB)/8, $0x003f03f0003f03f0 +DATA b64_enc_mask1<>+24(SB)/8, $0x003f03f0003f03f0 +GLOBL b64_enc_mask1<>(SB), RODATA|NOPTR, $32 + +DATA b64_enc_mask2<>+0(SB)/8, $0x0fc0fc000fc0fc00 +DATA b64_enc_mask2<>+8(SB)/8, $0x0fc0fc000fc0fc00 +DATA b64_enc_mask2<>+16(SB)/8, $0x0fc0fc000fc0fc00 +DATA b64_enc_mask2<>+24(SB)/8, $0x0fc0fc000fc0fc00 +GLOBL b64_enc_mask2<>(SB), RODATA|NOPTR, $32 + +DATA b64_enc_mult<>+0(SB)/8, $0x0400004004000040 +DATA b64_enc_mult<>+8(SB)/8, $0x0400004004000040 +DATA b64_enc_mult<>+16(SB)/8, $0x0400004004000040 +DATA b64_enc_mult<>+24(SB)/8, $0x0400004004000040 +GLOBL b64_enc_mult<>(SB), RODATA|NOPTR, $32 diff --git a/vendor/github.com/segmentio/asm/cpu/arm/arm.go b/vendor/github.com/segmentio/asm/cpu/arm/arm.go new file mode 100644 index 00000000000..47c695a075f --- /dev/null +++ b/vendor/github.com/segmentio/asm/cpu/arm/arm.go @@ -0,0 +1,80 @@ +package arm + +import ( + "github.com/segmentio/asm/cpu/cpuid" + . "golang.org/x/sys/cpu" +) + +type CPU cpuid.CPU + +func (cpu CPU) Has(feature Feature) bool { + return cpuid.CPU(cpu).Has(cpuid.Feature(feature)) +} + +func (cpu *CPU) set(feature Feature, enable bool) { + (*cpuid.CPU)(cpu).Set(cpuid.Feature(feature), enable) +} + +type Feature cpuid.Feature + +const ( + SWP Feature = 1 << iota // SWP instruction support + HALF // Half-word load and store support + THUMB // ARM Thumb instruction set + BIT26 // Address space limited to 26-bits + FASTMUL // 32-bit operand, 64-bit result multiplication support + FPA // Floating point arithmetic support + VFP // Vector floating point support + EDSP // DSP Extensions support + JAVA // Java instruction set + IWMMXT // Intel Wireless MMX technology support + CRUNCH // MaverickCrunch context switching and handling + THUMBEE // Thumb EE instruction set + NEON // NEON instruction set + VFPv3 // Vector floating point version 3 support + VFPv3D16 // Vector floating point version 3 D8-D15 + TLS // Thread local storage support + VFPv4 // Vector floating point version 4 support + IDIVA // Integer divide instruction support in ARM mode + IDIVT // Integer divide instruction support in Thumb mode + VFPD32 // Vector floating point version 3 D15-D31 + LPAE // Large Physical Address Extensions + EVTSTRM // Event stream support + AES // AES hardware implementation + PMULL // Polynomial multiplication instruction set + SHA1 // SHA1 hardware implementation + SHA2 // SHA2 hardware implementation + CRC32 // CRC32 hardware implementation +) + +func ABI() CPU { + cpu := CPU(0) + cpu.set(SWP, ARM.HasSWP) + cpu.set(HALF, ARM.HasHALF) + cpu.set(THUMB, ARM.HasTHUMB) + cpu.set(BIT26, ARM.Has26BIT) + cpu.set(FASTMUL, ARM.HasFASTMUL) + cpu.set(FPA, ARM.HasFPA) + cpu.set(VFP, ARM.HasVFP) + cpu.set(EDSP, ARM.HasEDSP) + cpu.set(JAVA, ARM.HasJAVA) + cpu.set(IWMMXT, ARM.HasIWMMXT) + cpu.set(CRUNCH, ARM.HasCRUNCH) + cpu.set(THUMBEE, ARM.HasTHUMBEE) + cpu.set(NEON, ARM.HasNEON) + cpu.set(VFPv3, ARM.HasVFPv3) + cpu.set(VFPv3D16, ARM.HasVFPv3D16) + cpu.set(TLS, ARM.HasTLS) + cpu.set(VFPv4, ARM.HasVFPv4) + cpu.set(IDIVA, ARM.HasIDIVA) + cpu.set(IDIVT, ARM.HasIDIVT) + cpu.set(VFPD32, ARM.HasVFPD32) + cpu.set(LPAE, ARM.HasLPAE) + cpu.set(EVTSTRM, ARM.HasEVTSTRM) + cpu.set(AES, ARM.HasAES) + cpu.set(PMULL, ARM.HasPMULL) + cpu.set(SHA1, ARM.HasSHA1) + cpu.set(SHA2, ARM.HasSHA2) + cpu.set(CRC32, ARM.HasCRC32) + return cpu +} diff --git a/vendor/github.com/segmentio/asm/cpu/arm64/arm64.go b/vendor/github.com/segmentio/asm/cpu/arm64/arm64.go new file mode 100644 index 00000000000..0c5134c76ee --- /dev/null +++ b/vendor/github.com/segmentio/asm/cpu/arm64/arm64.go @@ -0,0 +1,74 @@ +package arm64 + +import ( + "github.com/segmentio/asm/cpu/cpuid" + . "golang.org/x/sys/cpu" +) + +type CPU cpuid.CPU + +func (cpu CPU) Has(feature Feature) bool { + return cpuid.CPU(cpu).Has(cpuid.Feature(feature)) +} + +func (cpu *CPU) set(feature Feature, enable bool) { + (*cpuid.CPU)(cpu).Set(cpuid.Feature(feature), enable) +} + +type Feature cpuid.Feature + +const ( + FP Feature = 1 << iota // Floating-point instruction set (always available) + ASIMD // Advanced SIMD (always available) + EVTSTRM // Event stream support + AES // AES hardware implementation + PMULL // Polynomial multiplication instruction set + SHA1 // SHA1 hardware implementation + SHA2 // SHA2 hardware implementation + CRC32 // CRC32 hardware implementation + ATOMICS // Atomic memory operation instruction set + FPHP // Half precision floating-point instruction set + ASIMDHP // Advanced SIMD half precision instruction set + CPUID // CPUID identification scheme registers + ASIMDRDM // Rounding double multiply add/subtract instruction set + JSCVT // Javascript conversion from floating-point to integer + FCMA // Floating-point multiplication and addition of complex numbers + LRCPC // Release Consistent processor consistent support + DCPOP // Persistent memory support + SHA3 // SHA3 hardware implementation + SM3 // SM3 hardware implementation + SM4 // SM4 hardware implementation + ASIMDDP // Advanced SIMD double precision instruction set + SHA512 // SHA512 hardware implementation + SVE // Scalable Vector Extensions + ASIMDFHM // Advanced SIMD multiplication FP16 to FP32 +) + +func ABI() CPU { + cpu := CPU(0) + cpu.set(FP, ARM64.HasFP) + cpu.set(ASIMD, ARM64.HasASIMD) + cpu.set(EVTSTRM, ARM64.HasEVTSTRM) + cpu.set(AES, ARM64.HasAES) + cpu.set(PMULL, ARM64.HasPMULL) + cpu.set(SHA1, ARM64.HasSHA1) + cpu.set(SHA2, ARM64.HasSHA2) + cpu.set(CRC32, ARM64.HasCRC32) + cpu.set(ATOMICS, ARM64.HasATOMICS) + cpu.set(FPHP, ARM64.HasFPHP) + cpu.set(ASIMDHP, ARM64.HasASIMDHP) + cpu.set(CPUID, ARM64.HasCPUID) + cpu.set(ASIMDRDM, ARM64.HasASIMDRDM) + cpu.set(JSCVT, ARM64.HasJSCVT) + cpu.set(FCMA, ARM64.HasFCMA) + cpu.set(LRCPC, ARM64.HasLRCPC) + cpu.set(DCPOP, ARM64.HasDCPOP) + cpu.set(SHA3, ARM64.HasSHA3) + cpu.set(SM3, ARM64.HasSM3) + cpu.set(SM4, ARM64.HasSM4) + cpu.set(ASIMDDP, ARM64.HasASIMDDP) + cpu.set(SHA512, ARM64.HasSHA512) + cpu.set(SVE, ARM64.HasSVE) + cpu.set(ASIMDFHM, ARM64.HasASIMDFHM) + return cpu +} diff --git a/vendor/github.com/segmentio/asm/cpu/cpu.go b/vendor/github.com/segmentio/asm/cpu/cpu.go new file mode 100644 index 00000000000..6ddf4973f55 --- /dev/null +++ b/vendor/github.com/segmentio/asm/cpu/cpu.go @@ -0,0 +1,22 @@ +// Pakage cpu provides APIs to detect CPU features available at runtime. +package cpu + +import ( + "github.com/segmentio/asm/cpu/arm" + "github.com/segmentio/asm/cpu/arm64" + "github.com/segmentio/asm/cpu/x86" +) + +var ( + // X86 is the bitset representing the set of the x86 instruction sets are + // supported by the CPU. + X86 = x86.ABI() + + // ARM is the bitset representing which parts of the arm instruction sets + // are supported by the CPU. + ARM = arm.ABI() + + // ARM64 is the bitset representing which parts of the arm64 instruction + // sets are supported by the CPU. + ARM64 = arm64.ABI() +) diff --git a/vendor/github.com/segmentio/asm/cpu/cpuid/cpuid.go b/vendor/github.com/segmentio/asm/cpu/cpuid/cpuid.go new file mode 100644 index 00000000000..0949d3d584d --- /dev/null +++ b/vendor/github.com/segmentio/asm/cpu/cpuid/cpuid.go @@ -0,0 +1,32 @@ +// Package cpuid provides generic types used to represent CPU features supported +// by the architecture. +package cpuid + +// CPU is a bitset of feature flags representing the capabilities of various CPU +// architeectures that this package provides optimized assembly routines for. +// +// The intent is to provide a stable ABI between the Go code that generate the +// assembly, and the program that uses the library functions. +type CPU uint64 + +// Feature represents a single CPU feature. +type Feature uint64 + +const ( + // None is a Feature value that has no CPU features enabled. + None Feature = 0 + // All is a Feature value that has all CPU features enabled. + All Feature = 0xFFFFFFFFFFFFFFFF +) + +func (cpu CPU) Has(feature Feature) bool { + return (Feature(cpu) & feature) == feature +} + +func (cpu *CPU) Set(feature Feature, enabled bool) { + if enabled { + *cpu |= CPU(feature) + } else { + *cpu &= ^CPU(feature) + } +} diff --git a/vendor/github.com/segmentio/asm/cpu/x86/x86.go b/vendor/github.com/segmentio/asm/cpu/x86/x86.go new file mode 100644 index 00000000000..9e93537583d --- /dev/null +++ b/vendor/github.com/segmentio/asm/cpu/x86/x86.go @@ -0,0 +1,76 @@ +package x86 + +import ( + "github.com/segmentio/asm/cpu/cpuid" + . "golang.org/x/sys/cpu" +) + +type CPU cpuid.CPU + +func (cpu CPU) Has(feature Feature) bool { + return cpuid.CPU(cpu).Has(cpuid.Feature(feature)) +} + +func (cpu *CPU) set(feature Feature, enable bool) { + (*cpuid.CPU)(cpu).Set(cpuid.Feature(feature), enable) +} + +type Feature cpuid.Feature + +const ( + SSE Feature = 1 << iota // SSE functions + SSE2 // P4 SSE functions + SSE3 // Prescott SSE3 functions + SSE41 // Penryn SSE4.1 functions + SSE42 // Nehalem SSE4.2 functions + SSE4A // AMD Barcelona microarchitecture SSE4a instructions + SSSE3 // Conroe SSSE3 functions + AVX // AVX functions + AVX2 // AVX2 functions + AVX512BF16 // AVX-512 BFLOAT16 Instructions + AVX512BITALG // AVX-512 Bit Algorithms + AVX512BW // AVX-512 Byte and Word Instructions + AVX512CD // AVX-512 Conflict Detection Instructions + AVX512DQ // AVX-512 Doubleword and Quadword Instructions + AVX512ER // AVX-512 Exponential and Reciprocal Instructions + AVX512F // AVX-512 Foundation + AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions + AVX512PF // AVX-512 Prefetch Instructions + AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions + AVX512VBMI2 // AVX-512 Vector Bit Manipulation Instructions, Version 2 + AVX512VL // AVX-512 Vector Length Extensions + AVX512VNNI // AVX-512 Vector Neural Network Instructions + AVX512VP2INTERSECT // AVX-512 Intersect for D/Q + AVX512VPOPCNTDQ // AVX-512 Vector Population Count Doubleword and Quadword + CMOV // Conditional move +) + +func ABI() CPU { + cpu := CPU(0) + cpu.set(SSE, true) // TODO: golang.org/x/sys/cpu assumes all CPUs have SEE? + cpu.set(SSE2, X86.HasSSE2) + cpu.set(SSE3, X86.HasSSE3) + cpu.set(SSE41, X86.HasSSE41) + cpu.set(SSE42, X86.HasSSE42) + cpu.set(SSE4A, false) // TODO: add upstream support in golang.org/x/sys/cpu? + cpu.set(SSSE3, X86.HasSSSE3) + cpu.set(AVX, X86.HasAVX) + cpu.set(AVX2, X86.HasAVX2) + cpu.set(AVX512BF16, X86.HasAVX512BF16) + cpu.set(AVX512BITALG, X86.HasAVX512BITALG) + cpu.set(AVX512BW, X86.HasAVX512BW) + cpu.set(AVX512CD, X86.HasAVX512CD) + cpu.set(AVX512DQ, X86.HasAVX512DQ) + cpu.set(AVX512ER, X86.HasAVX512ER) + cpu.set(AVX512F, X86.HasAVX512F) + cpu.set(AVX512IFMA, X86.HasAVX512IFMA) + cpu.set(AVX512PF, X86.HasAVX512PF) + cpu.set(AVX512VBMI, X86.HasAVX512VBMI) + cpu.set(AVX512VBMI2, X86.HasAVX512VBMI2) + cpu.set(AVX512VL, X86.HasAVX512VL) + cpu.set(AVX512VNNI, X86.HasAVX512VNNI) + cpu.set(AVX512VP2INTERSECT, false) // TODO: add upstream support in golang.org/x/sys/cpu? + cpu.set(AVX512VPOPCNTDQ, X86.HasAVX512VPOPCNTDQ) + cpu.set(CMOV, true) // TODO: golang.org/x/sys/cpu assumes all CPUs have CMOV? + return cpu +} diff --git a/vendor/github.com/segmentio/asm/internal/unsafebytes/unsafebytes.go b/vendor/github.com/segmentio/asm/internal/unsafebytes/unsafebytes.go new file mode 100644 index 00000000000..913c9cc68b0 --- /dev/null +++ b/vendor/github.com/segmentio/asm/internal/unsafebytes/unsafebytes.go @@ -0,0 +1,20 @@ +package unsafebytes + +import "unsafe" + +func Pointer(b []byte) *byte { + return *(**byte)(unsafe.Pointer(&b)) +} + +func String(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +func BytesOf(s string) []byte { + return *(*[]byte)(unsafe.Pointer(&sliceHeader{str: s, cap: len(s)})) +} + +type sliceHeader struct { + str string + cap int +} diff --git a/vendor/github.com/segmentio/asm/keyset/keyset.go b/vendor/github.com/segmentio/asm/keyset/keyset.go new file mode 100644 index 00000000000..1943c5f7894 --- /dev/null +++ b/vendor/github.com/segmentio/asm/keyset/keyset.go @@ -0,0 +1,40 @@ +package keyset + +import ( + "bytes" + + "github.com/segmentio/asm/cpu" + "github.com/segmentio/asm/cpu/arm64" + "github.com/segmentio/asm/cpu/x86" +) + +// New prepares a set of keys for use with Lookup. +// +// An optimized routine is used if the processor supports AVX instructions and +// the maximum length of any of the keys is less than or equal to 16. If New +// returns nil, this indicates that an optimized routine is not available, and +// the caller should use a fallback. +func New(keys [][]byte) []byte { + maxWidth, hasNullByte := checkKeys(keys) + if hasNullByte || maxWidth > 16 || !(cpu.X86.Has(x86.AVX) || cpu.ARM64.Has(arm64.ASIMD)) { + return nil + } + + set := make([]byte, len(keys)*16) + for i, k := range keys { + copy(set[i*16:], k) + } + return set +} + +func checkKeys(keys [][]byte) (maxWidth int, hasNullByte bool) { + for _, k := range keys { + if len(k) > maxWidth { + maxWidth = len(k) + } + if bytes.IndexByte(k, 0) >= 0 { + hasNullByte = true + } + } + return +} diff --git a/vendor/github.com/segmentio/asm/keyset/keyset_amd64.go b/vendor/github.com/segmentio/asm/keyset/keyset_amd64.go new file mode 100644 index 00000000000..9554ee67cac --- /dev/null +++ b/vendor/github.com/segmentio/asm/keyset/keyset_amd64.go @@ -0,0 +1,10 @@ +// Code generated by command: go run keyset_asm.go -pkg keyset -out ../keyset/keyset_amd64.s -stubs ../keyset/keyset_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +package keyset + +// Lookup searches for a key in a set of keys, returning its index if +// found. If the key cannot be found, the number of keys is returned. +func Lookup(keyset []byte, key []byte) int diff --git a/vendor/github.com/segmentio/asm/keyset/keyset_amd64.s b/vendor/github.com/segmentio/asm/keyset/keyset_amd64.s new file mode 100644 index 00000000000..e27d2c45eae --- /dev/null +++ b/vendor/github.com/segmentio/asm/keyset/keyset_amd64.s @@ -0,0 +1,108 @@ +// Code generated by command: go run keyset_asm.go -pkg keyset -out ../keyset/keyset_amd64.s -stubs ../keyset/keyset_amd64.go. DO NOT EDIT. + +//go:build !purego +// +build !purego + +#include "textflag.h" + +// func Lookup(keyset []byte, key []byte) int +// Requires: AVX +TEXT ·Lookup(SB), NOSPLIT, $0-56 + MOVQ keyset_base+0(FP), AX + MOVQ keyset_len+8(FP), CX + SHRQ $0x04, CX + MOVQ key_base+24(FP), DX + MOVQ key_len+32(FP), BX + MOVQ key_cap+40(FP), SI + CMPQ BX, $0x10 + JA not_found + CMPQ SI, $0x10 + JB safe_load + +load: + VMOVUPS (DX), X0 + +prepare: + VPXOR X2, X2, X2 + VPCMPEQB X1, X1, X1 + LEAQ blend_masks<>+16(SB), DX + SUBQ BX, DX + VMOVUPS (DX), X3 + VPBLENDVB X3, X0, X2, X0 + XORQ DX, DX + MOVQ CX, BX + SHRQ $0x02, BX + SHLQ $0x02, BX + +bigloop: + CMPQ DX, BX + JE loop + VPCMPEQB (AX), X0, X8 + VPTEST X1, X8 + JCS done + VPCMPEQB 16(AX), X0, X9 + VPTEST X1, X9 + JCS found1 + VPCMPEQB 32(AX), X0, X10 + VPTEST X1, X10 + JCS found2 + VPCMPEQB 48(AX), X0, X11 + VPTEST X1, X11 + JCS found3 + ADDQ $0x04, DX + ADDQ $0x40, AX + JMP bigloop + +loop: + CMPQ DX, CX + JE done + VPCMPEQB (AX), X0, X2 + VPTEST X1, X2 + JCS done + INCQ DX + ADDQ $0x10, AX + JMP loop + JMP done + +found3: + INCQ DX + +found2: + INCQ DX + +found1: + INCQ DX + +done: + MOVQ DX, ret+48(FP) + RET + +not_found: + MOVQ CX, ret+48(FP) + RET + +safe_load: + MOVQ DX, SI + ANDQ $0x00000fff, SI + CMPQ SI, $0x00000ff0 + JBE load + MOVQ $0xfffffffffffffff0, SI + ADDQ BX, SI + VMOVUPS (DX)(SI*1), X0 + LEAQ shuffle_masks<>+16(SB), DX + SUBQ BX, DX + VMOVUPS (DX), X1 + VPSHUFB X1, X0, X0 + JMP prepare + +DATA blend_masks<>+0(SB)/8, $0xffffffffffffffff +DATA blend_masks<>+8(SB)/8, $0xffffffffffffffff +DATA blend_masks<>+16(SB)/8, $0x0000000000000000 +DATA blend_masks<>+24(SB)/8, $0x0000000000000000 +GLOBL blend_masks<>(SB), RODATA|NOPTR, $32 + +DATA shuffle_masks<>+0(SB)/8, $0x0706050403020100 +DATA shuffle_masks<>+8(SB)/8, $0x0f0e0d0c0b0a0908 +DATA shuffle_masks<>+16(SB)/8, $0x0706050403020100 +DATA shuffle_masks<>+24(SB)/8, $0x0f0e0d0c0b0a0908 +GLOBL shuffle_masks<>(SB), RODATA|NOPTR, $32 diff --git a/vendor/github.com/segmentio/asm/keyset/keyset_arm64.go b/vendor/github.com/segmentio/asm/keyset/keyset_arm64.go new file mode 100644 index 00000000000..feafabef6f5 --- /dev/null +++ b/vendor/github.com/segmentio/asm/keyset/keyset_arm64.go @@ -0,0 +1,8 @@ +//go:build !purego +// +build !purego + +package keyset + +// Lookup searches for a key in a set of keys, returning its index if +// found. If the key cannot be found, the number of keys is returned. +func Lookup(keyset []byte, key []byte) int diff --git a/vendor/github.com/segmentio/asm/keyset/keyset_arm64.s b/vendor/github.com/segmentio/asm/keyset/keyset_arm64.s new file mode 100644 index 00000000000..20acb992728 --- /dev/null +++ b/vendor/github.com/segmentio/asm/keyset/keyset_arm64.s @@ -0,0 +1,143 @@ +//go:build !purego +// +build !purego + +#include "textflag.h" + +// func Lookup(keyset []byte, key []byte) int +TEXT ·Lookup(SB), NOSPLIT, $0-56 + MOVD keyset+0(FP), R0 + MOVD keyset_len+8(FP), R1 + MOVD key+24(FP), R2 + MOVD key_len+32(FP), R3 + MOVD key_cap+40(FP), R4 + + // None of the keys in the set are greater than 16 bytes, so if the input + // key is we can jump straight to not found. + CMP $16, R3 + BHI notfound + + // We'll be moving the keyset pointer (R0) forward as we compare keys, so + // make a copy of the starting point (R6). Also add the byte length (R1) to + // obtain a pointer to the end of the keyset (R5). + MOVD R0, R6 + ADD R0, R1, R5 + + // Prepare a 64-bit mask of all ones. + MOVD $-1, R7 + + // Prepare a vector of all zeroes. + VMOV ZR, V1.B16 + + // Check that it's safe to load 16 bytes of input. If cap(input)<16, jump + // to a check that determines whether a tail load is necessary (to avoid a + // page fault). + CMP $16, R4 + BLO safeload + +load: + // Load the input key (V0) and pad with zero bytes (V1). To blend the two + // vectors, we load a mask for the particular key length and then use TBL + // to select bytes from either V0 or V1. + VLD1 (R2), [V0.B16] + MOVD $blend_masks<>(SB), R10 + ADD R3<<4, R10, R10 + VLD1 (R10), [V2.B16] + VTBL V2.B16, [V0.B16, V1.B16], V3.B16 + +loop: + // Loop through each 16 byte key in the keyset. + CMP R0, R5 + BEQ notfound + + // Load and compare the next key. + VLD1.P 16(R0), [V4.B16] + VCMEQ V3.B16, V4.B16, V5.B16 + VMOV V5.D[0], R8 + VMOV V5.D[1], R9 + AND R8, R9, R9 + + // If the masks match, we found the key. + CMP R9, R7 + BEQ found + JMP loop + +found: + // If the key was found, take the position in the keyset and convert it + // to an index. The keyset pointer (R0) will be 1 key past the match, so + // subtract the starting pointer (R6), divide by 16 to convert from byte + // length to an index, and then subtract one. + SUB R6, R0, R0 + ADD R0>>4, ZR, R0 + SUB $1, R0, R0 + MOVD R0, ret+48(FP) + RET + +notfound: + // Return the number of keys in the keyset, which is the byte length (R1) + // divided by 16. + ADD R1>>4, ZR, R1 + MOVD R1, ret+48(FP) + RET + +safeload: + // Check if the input crosses a page boundary. If not, jump back. + AND $4095, R2, R12 + CMP $4080, R12 + BLS load + + // If it does cross a page boundary, we must assume that loading 16 bytes + // will cause a fault. Instead, we load the 16 bytes up to and including the + // key and then shuffle the key forward in the register. We can shuffle and + // pad with zeroes at the same time to avoid having to also blend (as load + // does). + MOVD $16, R12 + SUB R3, R12, R12 + SUB R12, R2, R2 + VLD1 (R2), [V0.B16] + MOVD $shuffle_masks<>(SB), R10 + ADD R12, R10, R10 + VLD1 (R10), [V2.B16] + VTBL V2.B16, [V0.B16, V1.B16], V3.B16 + JMP loop + +DATA blend_masks<>+0(SB)/8, $0x1010101010101010 +DATA blend_masks<>+8(SB)/8, $0x1010101010101010 +DATA blend_masks<>+16(SB)/8, $0x1010101010101000 +DATA blend_masks<>+24(SB)/8, $0x1010101010101010 +DATA blend_masks<>+32(SB)/8, $0x1010101010100100 +DATA blend_masks<>+40(SB)/8, $0x1010101010101010 +DATA blend_masks<>+48(SB)/8, $0x1010101010020100 +DATA blend_masks<>+56(SB)/8, $0x1010101010101010 +DATA blend_masks<>+64(SB)/8, $0x1010101003020100 +DATA blend_masks<>+72(SB)/8, $0x1010101010101010 +DATA blend_masks<>+80(SB)/8, $0x1010100403020100 +DATA blend_masks<>+88(SB)/8, $0x1010101010101010 +DATA blend_masks<>+96(SB)/8, $0x1010050403020100 +DATA blend_masks<>+104(SB)/8, $0x1010101010101010 +DATA blend_masks<>+112(SB)/8, $0x1006050403020100 +DATA blend_masks<>+120(SB)/8, $0x1010101010101010 +DATA blend_masks<>+128(SB)/8, $0x0706050403020100 +DATA blend_masks<>+136(SB)/8, $0x1010101010101010 +DATA blend_masks<>+144(SB)/8, $0x0706050403020100 +DATA blend_masks<>+152(SB)/8, $0x1010101010101008 +DATA blend_masks<>+160(SB)/8, $0x0706050403020100 +DATA blend_masks<>+168(SB)/8, $0x1010101010100908 +DATA blend_masks<>+176(SB)/8, $0x0706050403020100 +DATA blend_masks<>+184(SB)/8, $0x10101010100A0908 +DATA blend_masks<>+192(SB)/8, $0x0706050403020100 +DATA blend_masks<>+200(SB)/8, $0x101010100B0A0908 +DATA blend_masks<>+208(SB)/8, $0x0706050403020100 +DATA blend_masks<>+216(SB)/8, $0x1010100C0B0A0908 +DATA blend_masks<>+224(SB)/8, $0x0706050403020100 +DATA blend_masks<>+232(SB)/8, $0x10100D0C0B0A0908 +DATA blend_masks<>+240(SB)/8, $0x0706050403020100 +DATA blend_masks<>+248(SB)/8, $0x100E0D0C0B0A0908 +DATA blend_masks<>+256(SB)/8, $0x0706050403020100 +DATA blend_masks<>+264(SB)/8, $0x0F0E0D0C0B0A0908 +GLOBL blend_masks<>(SB), RODATA|NOPTR, $272 + +DATA shuffle_masks<>+0(SB)/8, $0x0706050403020100 +DATA shuffle_masks<>+8(SB)/8, $0x0F0E0D0C0B0A0908 +DATA shuffle_masks<>+16(SB)/8, $0x1010101010101010 +DATA shuffle_masks<>+24(SB)/8, $0x1010101010101010 +GLOBL shuffle_masks<>(SB), RODATA|NOPTR, $32 diff --git a/vendor/github.com/segmentio/asm/keyset/keyset_default.go b/vendor/github.com/segmentio/asm/keyset/keyset_default.go new file mode 100644 index 00000000000..1fa7d3fc84b --- /dev/null +++ b/vendor/github.com/segmentio/asm/keyset/keyset_default.go @@ -0,0 +1,19 @@ +//go:build purego || !(amd64 || arm64) +// +build purego !amd64,!arm64 + +package keyset + +func Lookup(keyset []byte, key []byte) int { + if len(key) > 16 { + return len(keyset) / 16 + } + var padded [16]byte + copy(padded[:], key) + + for i := 0; i < len(keyset); i += 16 { + if string(padded[:]) == string(keyset[i:i+16]) { + return i / 16 + } + } + return len(keyset) / 16 +} diff --git a/vendor/github.com/segmentio/encoding/LICENSE b/vendor/github.com/segmentio/encoding/LICENSE new file mode 100644 index 00000000000..1fbffdf72ad --- /dev/null +++ b/vendor/github.com/segmentio/encoding/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Segment.io, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/segmentio/encoding/ascii/equal_fold.go b/vendor/github.com/segmentio/encoding/ascii/equal_fold.go new file mode 100644 index 00000000000..4207f171033 --- /dev/null +++ b/vendor/github.com/segmentio/encoding/ascii/equal_fold.go @@ -0,0 +1,40 @@ +//go:generate go run equal_fold_asm.go -out equal_fold_amd64.s -stubs equal_fold_amd64.go +package ascii + +import ( + "github.com/segmentio/asm/ascii" +) + +// EqualFold is a version of bytes.EqualFold designed to work on ASCII input +// instead of UTF-8. +// +// When the program has guarantees that the input is composed of ASCII +// characters only, it allows for greater optimizations. +func EqualFold(a, b []byte) bool { + return ascii.EqualFold(a, b) +} + +func HasPrefixFold(s, prefix []byte) bool { + return ascii.HasPrefixFold(s, prefix) +} + +func HasSuffixFold(s, suffix []byte) bool { + return ascii.HasSuffixFold(s, suffix) +} + +// EqualFoldString is a version of strings.EqualFold designed to work on ASCII +// input instead of UTF-8. +// +// When the program has guarantees that the input is composed of ASCII +// characters only, it allows for greater optimizations. +func EqualFoldString(a, b string) bool { + return ascii.EqualFoldString(a, b) +} + +func HasPrefixFoldString(s, prefix string) bool { + return ascii.HasPrefixFoldString(s, prefix) +} + +func HasSuffixFoldString(s, suffix string) bool { + return ascii.HasSuffixFoldString(s, suffix) +} diff --git a/vendor/github.com/segmentio/encoding/ascii/valid.go b/vendor/github.com/segmentio/encoding/ascii/valid.go new file mode 100644 index 00000000000..68b7c6ca2bb --- /dev/null +++ b/vendor/github.com/segmentio/encoding/ascii/valid.go @@ -0,0 +1,26 @@ +//go:generate go run valid_asm.go -out valid_amd64.s -stubs valid_amd64.go +package ascii + +import ( + "github.com/segmentio/asm/ascii" +) + +// Valid returns true if b contains only ASCII characters. +func Valid(b []byte) bool { + return ascii.Valid(b) +} + +// ValidBytes returns true if b is an ASCII character. +func ValidByte(b byte) bool { + return ascii.ValidByte(b) +} + +// ValidBytes returns true if b is an ASCII character. +func ValidRune(r rune) bool { + return ascii.ValidRune(r) +} + +// ValidString returns true if s contains only ASCII characters. +func ValidString(s string) bool { + return ascii.ValidString(s) +} diff --git a/vendor/github.com/segmentio/encoding/ascii/valid_print.go b/vendor/github.com/segmentio/encoding/ascii/valid_print.go new file mode 100644 index 00000000000..241f58499a8 --- /dev/null +++ b/vendor/github.com/segmentio/encoding/ascii/valid_print.go @@ -0,0 +1,26 @@ +//go:generate go run valid_print_asm.go -out valid_print_amd64.s -stubs valid_print_amd64.go +package ascii + +import ( + "github.com/segmentio/asm/ascii" +) + +// Valid returns true if b contains only printable ASCII characters. +func ValidPrint(b []byte) bool { + return ascii.ValidPrint(b) +} + +// ValidBytes returns true if b is an ASCII character. +func ValidPrintByte(b byte) bool { + return ascii.ValidPrintByte(b) +} + +// ValidBytes returns true if b is an ASCII character. +func ValidPrintRune(r rune) bool { + return ascii.ValidPrintRune(r) +} + +// ValidString returns true if s contains only printable ASCII characters. +func ValidPrintString(s string) bool { + return ascii.ValidPrintString(s) +} diff --git a/vendor/github.com/segmentio/encoding/iso8601/parse.go b/vendor/github.com/segmentio/encoding/iso8601/parse.go new file mode 100644 index 00000000000..6fbe5dc31db --- /dev/null +++ b/vendor/github.com/segmentio/encoding/iso8601/parse.go @@ -0,0 +1,185 @@ +package iso8601 + +import ( + "encoding/binary" + "errors" + "time" + "unsafe" +) + +var ( + errInvalidTimestamp = errors.New("invalid ISO8601 timestamp") + errMonthOutOfRange = errors.New("month out of range") + errDayOutOfRange = errors.New("day out of range") + errHourOutOfRange = errors.New("hour out of range") + errMinuteOutOfRange = errors.New("minute out of range") + errSecondOutOfRange = errors.New("second out of range") +) + +// Parse parses an ISO8601 timestamp, e.g. "2021-03-25T21:36:12Z". +func Parse(input string) (time.Time, error) { + b := unsafeStringToBytes(input) + if len(b) >= 20 && len(b) <= 30 && b[len(b)-1] == 'Z' { + if len(b) == 21 || (len(b) > 21 && b[19] != '.') { + return time.Time{}, errInvalidTimestamp + } + + t1 := binary.LittleEndian.Uint64(b) + t2 := binary.LittleEndian.Uint64(b[8:16]) + t3 := uint64(b[16]) | uint64(b[17])<<8 | uint64(b[18])<<16 | uint64('Z')<<24 + + // Check for valid separators by masking input with " - - T : : Z". + // If separators are all valid, replace them with a '0' (0x30) byte and + // check all bytes are now numeric. + if !match(t1, mask1) || !match(t2, mask2) || !match(t3, mask3) { + return time.Time{}, errInvalidTimestamp + } + t1 ^= replace1 + t2 ^= replace2 + t3 ^= replace3 + if (nonNumeric(t1) | nonNumeric(t2) | nonNumeric(t3)) != 0 { + return time.Time{}, errInvalidTimestamp + } + + t1 -= zero + t2 -= zero + t3 -= zero + year := (t1&0xF)*1000 + (t1>>8&0xF)*100 + (t1>>16&0xF)*10 + (t1 >> 24 & 0xF) + month := (t1>>40&0xF)*10 + (t1 >> 48 & 0xF) + day := (t2&0xF)*10 + (t2 >> 8 & 0xF) + hour := (t2>>24&0xF)*10 + (t2 >> 32 & 0xF) + minute := (t2>>48&0xF)*10 + (t2 >> 56) + second := (t3>>8&0xF)*10 + (t3 >> 16) + + nanos := int64(0) + if len(b) > 20 { + for _, c := range b[20 : len(b)-1] { + if c < '0' || c > '9' { + return time.Time{}, errInvalidTimestamp + } + nanos = (nanos * 10) + int64(c-'0') + } + nanos *= pow10[30-len(b)] + } + + if err := validate(year, month, day, hour, minute, second); err != nil { + return time.Time{}, err + } + + unixSeconds := int64(daysSinceEpoch(year, month, day))*86400 + int64(hour*3600+minute*60+second) + return time.Unix(unixSeconds, nanos).UTC(), nil + } + + // Fallback to using time.Parse(). + t, err := time.Parse(time.RFC3339Nano, input) + if err != nil { + // Override (and don't wrap) the error here. The error returned by + // time.Parse() is dynamic, and includes a reference to the input + // string. By overriding the error, we guarantee that the input string + // doesn't escape. + return time.Time{}, errInvalidTimestamp + } + return t, nil +} + +var pow10 = []int64{1, 10, 100, 1000, 1e4, 1e5, 1e6, 1e7, 1e8} + +const ( + mask1 = 0x2d00002d00000000 // YYYY-MM- + mask2 = 0x00003a0000540000 // DDTHH:MM + mask3 = 0x000000005a00003a // :SSZ____ + + // Generate masks that replace the separators with a numeric byte. + // The input must have valid separators. XOR with the separator bytes + // to zero them out and then XOR with 0x30 to replace them with '0'. + replace1 = mask1 ^ 0x3000003000000000 + replace2 = mask2 ^ 0x0000300000300000 + replace3 = mask3 ^ 0x3030303030000030 + + lsb = ^uint64(0) / 255 + msb = lsb * 0x80 + + zero = lsb * '0' + nine = lsb * '9' +) + +func validate(year, month, day, hour, minute, second uint64) error { + if day == 0 || day > 31 { + return errDayOutOfRange + } + if month == 0 || month > 12 { + return errMonthOutOfRange + } + if hour >= 24 { + return errHourOutOfRange + } + if minute >= 60 { + return errMinuteOutOfRange + } + if second >= 60 { + return errSecondOutOfRange + } + if month == 2 && (day > 29 || (day == 29 && !isLeapYear(year))) { + return errDayOutOfRange + } + if day == 31 { + switch month { + case 4, 6, 9, 11: + return errDayOutOfRange + } + } + return nil +} + +func match(u, mask uint64) bool { + return (u & mask) == mask +} + +func nonNumeric(u uint64) uint64 { + // Derived from https://graphics.stanford.edu/~seander/bithacks.html#HasLessInWord. + // Subtract '0' (0x30) from each byte so that the MSB is set in each byte + // if there's a byte less than '0' (0x30). Add 0x46 (0x7F-'9') so that the + // MSB is set if there's a byte greater than '9' (0x39). To handle overflow + // when adding 0x46, include the MSB from the input bytes in the final mask. + // Remove all but the MSBs and then you're left with a mask where each + // non-numeric byte from the input has its MSB set in the output. + return ((u - zero) | (u + (^msb - nine)) | u) & msb +} + +func daysSinceEpoch(year, month, day uint64) uint64 { + // Derived from https://blog.reverberate.org/2020/05/12/optimizing-date-algorithms.html. + monthAdjusted := month - 3 + var carry uint64 + if monthAdjusted > month { + carry = 1 + } + var adjust uint64 + if carry == 1 { + adjust = 12 + } + yearAdjusted := year + 4800 - carry + monthDays := ((monthAdjusted+adjust)*62719 + 769) / 2048 + leapDays := yearAdjusted/4 - yearAdjusted/100 + yearAdjusted/400 + return yearAdjusted*365 + leapDays + monthDays + (day - 1) - 2472632 +} + +func isLeapYear(y uint64) bool { + return (y%4) == 0 && ((y%100) != 0 || (y%400) == 0) +} + +func unsafeStringToBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer(&sliceHeader{ + Data: *(*unsafe.Pointer)(unsafe.Pointer(&s)), + Len: len(s), + Cap: len(s), + })) +} + +// sliceHeader is like reflect.SliceHeader but the Data field is a +// unsafe.Pointer instead of being a uintptr to avoid invalid +// conversions from uintptr to unsafe.Pointer. +type sliceHeader struct { + Data unsafe.Pointer + Len int + Cap int +} diff --git a/vendor/github.com/segmentio/encoding/iso8601/valid.go b/vendor/github.com/segmentio/encoding/iso8601/valid.go new file mode 100644 index 00000000000..2c4edec77bb --- /dev/null +++ b/vendor/github.com/segmentio/encoding/iso8601/valid.go @@ -0,0 +1,179 @@ +package iso8601 + +// ValidFlags is a bitset type used to configure the behavior of the Valid +//function. +type ValidFlags int + +const ( + // Strict is a validation flag used to represent a string iso8601 validation + // (this is the default). + Strict ValidFlags = 0 + + // AllowSpaceSeparator allows the presence of a space instead of a 'T' as + // separator between the date and time. + AllowSpaceSeparator ValidFlags = 1 << iota + + // AllowMissingTime allows the value to contain only a date. + AllowMissingTime + + // AllowMissingSubsecond allows the value to contain only a date and time. + AllowMissingSubsecond + + // AllowMissingTimezone allows the value to be missing the timezone + // information. + AllowMissingTimezone + + // AllowNumericTimezone allows the value to represent timezones in their + // numeric form. + AllowNumericTimezone + + // Flexible is a combination of all validation flag that allow for + // non-strict checking of the input value. + Flexible = AllowSpaceSeparator | AllowMissingTime | AllowMissingSubsecond | AllowMissingTimezone | AllowNumericTimezone +) + +// Valid check value to verify whether or not it is a valid iso8601 time +// representation. +func Valid(value string, flags ValidFlags) bool { + var ok bool + + // year + if value, ok = readDigits(value, 4, 4); !ok { + return false + } + + if value, ok = readByte(value, '-'); !ok { + return false + } + + // month + if value, ok = readDigits(value, 2, 2); !ok { + return false + } + + if value, ok = readByte(value, '-'); !ok { + return false + } + + // day + if value, ok = readDigits(value, 2, 2); !ok { + return false + } + + if len(value) == 0 && (flags&AllowMissingTime) != 0 { + return true // date only + } + + // separator + if value, ok = readByte(value, 'T'); !ok { + if (flags & AllowSpaceSeparator) == 0 { + return false + } + if value, ok = readByte(value, ' '); !ok { + return false + } + } + + // hour + if value, ok = readDigits(value, 2, 2); !ok { + return false + } + + if value, ok = readByte(value, ':'); !ok { + return false + } + + // minute + if value, ok = readDigits(value, 2, 2); !ok { + return false + } + + if value, ok = readByte(value, ':'); !ok { + return false + } + + // second + if value, ok = readDigits(value, 2, 2); !ok { + return false + } + + // microsecond + if value, ok = readByte(value, '.'); !ok { + if (flags & AllowMissingSubsecond) == 0 { + return false + } + } else { + if value, ok = readDigits(value, 1, 9); !ok { + return false + } + } + + if len(value) == 0 && (flags&AllowMissingTimezone) != 0 { + return true // date and time + } + + // timezone + if value, ok = readByte(value, 'Z'); ok { + return len(value) == 0 + } + + if (flags & AllowSpaceSeparator) != 0 { + value, _ = readByte(value, ' ') + } + + if value, ok = readByte(value, '+'); !ok { + if value, ok = readByte(value, '-'); !ok { + return false + } + } + + // timezone hour + if value, ok = readDigits(value, 2, 2); !ok { + return false + } + + if value, ok = readByte(value, ':'); !ok { + if (flags & AllowNumericTimezone) == 0 { + return false + } + } + + // timezone minute + if value, ok = readDigits(value, 2, 2); !ok { + return false + } + + return len(value) == 0 +} + +func readDigits(value string, min, max int) (string, bool) { + if len(value) < min { + return value, false + } + + i := 0 + + for i < max && i < len(value) && isDigit(value[i]) { + i++ + } + + if i < max && i < min { + return value, false + } + + return value[i:], true +} + +func readByte(value string, c byte) (string, bool) { + if len(value) == 0 { + return value, false + } + if value[0] != c { + return value, false + } + return value[1:], true +} + +func isDigit(c byte) bool { + return '0' <= c && c <= '9' +} diff --git a/vendor/github.com/segmentio/encoding/json/README.md b/vendor/github.com/segmentio/encoding/json/README.md new file mode 100644 index 00000000000..c5ed94b73dc --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/README.md @@ -0,0 +1,76 @@ +# encoding/json [![GoDoc](https://godoc.org/github.com/segmentio/encoding/json?status.svg)](https://godoc.org/github.com/segmentio/encoding/json) + +Go package offering a replacement implementation of the standard library's +[`encoding/json`](https://golang.org/pkg/encoding/json/) package, with much +better performance. + +## Usage + +The exported API of this package mirrors the standard library's +[`encoding/json`](https://golang.org/pkg/encoding/json/) package, the only +change needed to take advantage of the performance improvements is the import +path of the `json` package, from: +```go +import ( + "encoding/json" +) +``` +to +```go +import ( + "github.com/segmentio/encoding/json" +) +``` + +One way to gain higher encoding throughput is to disable HTML escaping. +It allows the string encoding to use a much more efficient code path which +does not require parsing UTF-8 runes most of the time. + +## Performance Improvements + +The internal implementation uses a fair amount of unsafe operations (untyped +code, pointer arithmetic, etc...) to avoid using reflection as much as possible, +which is often the reason why serialization code has a large CPU and memory +footprint. + +The package aims for zero unnecessary dynamic memory allocations and hot code +paths that are mostly free from calls into the reflect package. + +## Compatibility with encoding/json + +This package aims to be a drop-in replacement, therefore it is tested to behave +exactly like the standard library's package. However, there are still a few +missing features that have not been ported yet: + +- Streaming decoder, currently the `Decoder` implementation offered by the +package does not support progressively reading values from a JSON array (unlike +the standard library). In our experience this is a very rare use-case, if you +need it you're better off sticking to the standard library, or spend a bit of +time implementing it in here ;) + +Note that none of those features should result in performance degradations if +they were implemented in the package, and we welcome contributions! + +## Trade-offs + +As one would expect, we had to make a couple of trade-offs to achieve greater +performance than the standard library, but there were also features that we +did not want to give away. + +Other open-source packages offering a reduced CPU and memory footprint usually +do so by designing a different API, or require code generation (therefore adding +complexity to the build process). These were not acceptable conditions for us, +as we were not willing to trade off developer productivity for better runtime +performance. To achieve this, we chose to exactly replicate the standard +library interfaces and behavior, which meant the package implementation was the +only area that we were able to work with. The internals of this package make +heavy use of unsafe pointer arithmetics and other performance optimizations, +and therefore are not as approachable as typical Go programs. Basically, we put +a bigger burden on maintainers to achieve better runtime cost without +sacrificing developer productivity. + +For these reasons, we also don't believe that this code should be ported upstream +to the standard `encoding/json` package. The standard library has to remain +readable and approachable to maximize stability and maintainability, and make +projects like this one possible because a high quality reference implementation +already exists. diff --git a/vendor/github.com/segmentio/encoding/json/codec.go b/vendor/github.com/segmentio/encoding/json/codec.go new file mode 100644 index 00000000000..908c3f6dc3a --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/codec.go @@ -0,0 +1,1232 @@ +package json + +import ( + "encoding" + "encoding/json" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "sync/atomic" + "time" + "unicode" + "unsafe" + + "github.com/segmentio/asm/keyset" +) + +const ( + // 1000 is the value used by the standard encoding/json package. + // + // https://cs.opensource.google/go/go/+/refs/tags/go1.17.3:src/encoding/json/encode.go;drc=refs%2Ftags%2Fgo1.17.3;l=300 + startDetectingCyclesAfter = 1000 +) + +type codec struct { + encode encodeFunc + decode decodeFunc +} + +type encoder struct { + flags AppendFlags + // ptrDepth tracks the depth of pointer cycles, when it reaches the value + // of startDetectingCyclesAfter, the ptrSeen map is allocated and the + // encoder starts tracking pointers it has seen as an attempt to detect + // whether it has entered a pointer cycle and needs to error before the + // goroutine runs out of stack space. + ptrDepth uint32 + ptrSeen map[unsafe.Pointer]struct{} +} + +type decoder struct { + flags ParseFlags +} + +type encodeFunc func(encoder, []byte, unsafe.Pointer) ([]byte, error) +type decodeFunc func(decoder, []byte, unsafe.Pointer) ([]byte, error) + +type emptyFunc func(unsafe.Pointer) bool +type sortFunc func([]reflect.Value) + +var ( + // Eventually consistent cache mapping go types to dynamically generated + // codecs. + // + // Note: using a uintptr as key instead of reflect.Type shaved ~15ns off of + // the ~30ns Marhsal/Unmarshal functions which were dominated by the map + // lookup time for simple types like bool, int, etc.. + cache unsafe.Pointer // map[unsafe.Pointer]codec +) + +func cacheLoad() map[unsafe.Pointer]codec { + p := atomic.LoadPointer(&cache) + return *(*map[unsafe.Pointer]codec)(unsafe.Pointer(&p)) +} + +func cacheStore(typ reflect.Type, cod codec, oldCodecs map[unsafe.Pointer]codec) { + newCodecs := make(map[unsafe.Pointer]codec, len(oldCodecs)+1) + newCodecs[typeid(typ)] = cod + + for t, c := range oldCodecs { + newCodecs[t] = c + } + + atomic.StorePointer(&cache, *(*unsafe.Pointer)(unsafe.Pointer(&newCodecs))) +} + +func typeid(t reflect.Type) unsafe.Pointer { + return (*iface)(unsafe.Pointer(&t)).ptr +} + +func constructCachedCodec(t reflect.Type, cache map[unsafe.Pointer]codec) codec { + c := constructCodec(t, map[reflect.Type]*structType{}, t.Kind() == reflect.Ptr) + + if inlined(t) { + c.encode = constructInlineValueEncodeFunc(c.encode) + } + + cacheStore(t, c, cache) + return c +} + +func constructCodec(t reflect.Type, seen map[reflect.Type]*structType, canAddr bool) (c codec) { + switch t { + case nullType, nil: + c = codec{encode: encoder.encodeNull, decode: decoder.decodeNull} + + case numberType: + c = codec{encode: encoder.encodeNumber, decode: decoder.decodeNumber} + + case bytesType: + c = codec{encode: encoder.encodeBytes, decode: decoder.decodeBytes} + + case durationType: + c = codec{encode: encoder.encodeDuration, decode: decoder.decodeDuration} + + case timeType: + c = codec{encode: encoder.encodeTime, decode: decoder.decodeTime} + + case interfaceType: + c = codec{encode: encoder.encodeInterface, decode: decoder.decodeInterface} + + case rawMessageType: + c = codec{encode: encoder.encodeRawMessage, decode: decoder.decodeRawMessage} + + case numberPtrType: + c = constructPointerCodec(numberPtrType, nil) + + case durationPtrType: + c = constructPointerCodec(durationPtrType, nil) + + case timePtrType: + c = constructPointerCodec(timePtrType, nil) + + case rawMessagePtrType: + c = constructPointerCodec(rawMessagePtrType, nil) + } + + if c.encode != nil { + return + } + + switch t.Kind() { + case reflect.Bool: + c = codec{encode: encoder.encodeBool, decode: decoder.decodeBool} + + case reflect.Int: + c = codec{encode: encoder.encodeInt, decode: decoder.decodeInt} + + case reflect.Int8: + c = codec{encode: encoder.encodeInt8, decode: decoder.decodeInt8} + + case reflect.Int16: + c = codec{encode: encoder.encodeInt16, decode: decoder.decodeInt16} + + case reflect.Int32: + c = codec{encode: encoder.encodeInt32, decode: decoder.decodeInt32} + + case reflect.Int64: + c = codec{encode: encoder.encodeInt64, decode: decoder.decodeInt64} + + case reflect.Uint: + c = codec{encode: encoder.encodeUint, decode: decoder.decodeUint} + + case reflect.Uintptr: + c = codec{encode: encoder.encodeUintptr, decode: decoder.decodeUintptr} + + case reflect.Uint8: + c = codec{encode: encoder.encodeUint8, decode: decoder.decodeUint8} + + case reflect.Uint16: + c = codec{encode: encoder.encodeUint16, decode: decoder.decodeUint16} + + case reflect.Uint32: + c = codec{encode: encoder.encodeUint32, decode: decoder.decodeUint32} + + case reflect.Uint64: + c = codec{encode: encoder.encodeUint64, decode: decoder.decodeUint64} + + case reflect.Float32: + c = codec{encode: encoder.encodeFloat32, decode: decoder.decodeFloat32} + + case reflect.Float64: + c = codec{encode: encoder.encodeFloat64, decode: decoder.decodeFloat64} + + case reflect.String: + c = codec{encode: encoder.encodeString, decode: decoder.decodeString} + + case reflect.Interface: + c = constructInterfaceCodec(t) + + case reflect.Array: + c = constructArrayCodec(t, seen, canAddr) + + case reflect.Slice: + c = constructSliceCodec(t, seen) + + case reflect.Map: + c = constructMapCodec(t, seen) + + case reflect.Struct: + c = constructStructCodec(t, seen, canAddr) + + case reflect.Ptr: + c = constructPointerCodec(t, seen) + + default: + c = constructUnsupportedTypeCodec(t) + } + + p := reflect.PtrTo(t) + + if canAddr { + switch { + case p.Implements(jsonMarshalerType): + c.encode = constructJSONMarshalerEncodeFunc(t, true) + case p.Implements(textMarshalerType): + c.encode = constructTextMarshalerEncodeFunc(t, true) + } + } + + switch { + case t.Implements(jsonMarshalerType): + c.encode = constructJSONMarshalerEncodeFunc(t, false) + case t.Implements(textMarshalerType): + c.encode = constructTextMarshalerEncodeFunc(t, false) + } + + switch { + case p.Implements(jsonUnmarshalerType): + c.decode = constructJSONUnmarshalerDecodeFunc(t, true) + case p.Implements(textUnmarshalerType): + c.decode = constructTextUnmarshalerDecodeFunc(t, true) + } + + return +} + +func constructStringCodec(t reflect.Type, seen map[reflect.Type]*structType, canAddr bool) codec { + c := constructCodec(t, seen, canAddr) + return codec{ + encode: constructStringEncodeFunc(c.encode), + decode: constructStringDecodeFunc(c.decode), + } +} + +func constructStringEncodeFunc(encode encodeFunc) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeToString(b, p, encode) + } +} + +func constructStringDecodeFunc(decode decodeFunc) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeFromString(b, p, decode) + } +} + +func constructStringToIntDecodeFunc(t reflect.Type, decode decodeFunc) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeFromStringToInt(b, p, t, decode) + } +} + +func constructArrayCodec(t reflect.Type, seen map[reflect.Type]*structType, canAddr bool) codec { + e := t.Elem() + c := constructCodec(e, seen, canAddr) + s := alignedSize(e) + return codec{ + encode: constructArrayEncodeFunc(s, t, c.encode), + decode: constructArrayDecodeFunc(s, t, c.decode), + } +} + +func constructArrayEncodeFunc(size uintptr, t reflect.Type, encode encodeFunc) encodeFunc { + n := t.Len() + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeArray(b, p, n, size, t, encode) + } +} + +func constructArrayDecodeFunc(size uintptr, t reflect.Type, decode decodeFunc) decodeFunc { + n := t.Len() + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeArray(b, p, n, size, t, decode) + } +} + +func constructSliceCodec(t reflect.Type, seen map[reflect.Type]*structType) codec { + e := t.Elem() + s := alignedSize(e) + + if e.Kind() == reflect.Uint8 { + // Go 1.7+ behavior: slices of byte types (and aliases) may override the + // default encoding and decoding behaviors by implementing marshaler and + // unmarshaler interfaces. + p := reflect.PtrTo(e) + c := codec{} + + switch { + case e.Implements(jsonMarshalerType): + c.encode = constructJSONMarshalerEncodeFunc(e, false) + case e.Implements(textMarshalerType): + c.encode = constructTextMarshalerEncodeFunc(e, false) + case p.Implements(jsonMarshalerType): + c.encode = constructJSONMarshalerEncodeFunc(e, true) + case p.Implements(textMarshalerType): + c.encode = constructTextMarshalerEncodeFunc(e, true) + } + + switch { + case e.Implements(jsonUnmarshalerType): + c.decode = constructJSONUnmarshalerDecodeFunc(e, false) + case e.Implements(textUnmarshalerType): + c.decode = constructTextUnmarshalerDecodeFunc(e, false) + case p.Implements(jsonUnmarshalerType): + c.decode = constructJSONUnmarshalerDecodeFunc(e, true) + case p.Implements(textUnmarshalerType): + c.decode = constructTextUnmarshalerDecodeFunc(e, true) + } + + if c.encode != nil { + c.encode = constructSliceEncodeFunc(s, t, c.encode) + } else { + c.encode = encoder.encodeBytes + } + + if c.decode != nil { + c.decode = constructSliceDecodeFunc(s, t, c.decode) + } else { + c.decode = decoder.decodeBytes + } + + return c + } + + c := constructCodec(e, seen, true) + return codec{ + encode: constructSliceEncodeFunc(s, t, c.encode), + decode: constructSliceDecodeFunc(s, t, c.decode), + } +} + +func constructSliceEncodeFunc(size uintptr, t reflect.Type, encode encodeFunc) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeSlice(b, p, size, t, encode) + } +} + +func constructSliceDecodeFunc(size uintptr, t reflect.Type, decode decodeFunc) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeSlice(b, p, size, t, decode) + } +} + +func constructMapCodec(t reflect.Type, seen map[reflect.Type]*structType) codec { + var sortKeys sortFunc + k := t.Key() + v := t.Elem() + + // Faster implementations for some common cases. + switch { + case k == stringType && v == interfaceType: + return codec{ + encode: encoder.encodeMapStringInterface, + decode: decoder.decodeMapStringInterface, + } + + case k == stringType && v == rawMessageType: + return codec{ + encode: encoder.encodeMapStringRawMessage, + decode: decoder.decodeMapStringRawMessage, + } + + case k == stringType && v == stringType: + return codec{ + encode: encoder.encodeMapStringString, + decode: decoder.decodeMapStringString, + } + + case k == stringType && v == stringsType: + return codec{ + encode: encoder.encodeMapStringStringSlice, + decode: decoder.decodeMapStringStringSlice, + } + + case k == stringType && v == boolType: + return codec{ + encode: encoder.encodeMapStringBool, + decode: decoder.decodeMapStringBool, + } + } + + kc := codec{} + vc := constructCodec(v, seen, false) + + if k.Implements(textMarshalerType) || reflect.PtrTo(k).Implements(textUnmarshalerType) { + kc.encode = constructTextMarshalerEncodeFunc(k, false) + kc.decode = constructTextUnmarshalerDecodeFunc(k, true) + + sortKeys = func(keys []reflect.Value) { + sort.Slice(keys, func(i, j int) bool { + // This is a performance abomination but the use case is rare + // enough that it shouldn't be a problem in practice. + k1, _ := keys[i].Interface().(encoding.TextMarshaler).MarshalText() + k2, _ := keys[j].Interface().(encoding.TextMarshaler).MarshalText() + return string(k1) < string(k2) + }) + } + } else { + switch k.Kind() { + case reflect.String: + kc.encode = encoder.encodeString + kc.decode = decoder.decodeString + + sortKeys = func(keys []reflect.Value) { + sort.Slice(keys, func(i, j int) bool { return keys[i].String() < keys[j].String() }) + } + + case reflect.Int, + reflect.Int8, + reflect.Int16, + reflect.Int32, + reflect.Int64: + kc = constructStringCodec(k, seen, false) + + sortKeys = func(keys []reflect.Value) { + sort.Slice(keys, func(i, j int) bool { return intStringsAreSorted(keys[i].Int(), keys[j].Int()) }) + } + + case reflect.Uint, + reflect.Uintptr, + reflect.Uint8, + reflect.Uint16, + reflect.Uint32, + reflect.Uint64: + kc = constructStringCodec(k, seen, false) + + sortKeys = func(keys []reflect.Value) { + sort.Slice(keys, func(i, j int) bool { return uintStringsAreSorted(keys[i].Uint(), keys[j].Uint()) }) + } + + default: + return constructUnsupportedTypeCodec(t) + } + } + + if inlined(v) { + vc.encode = constructInlineValueEncodeFunc(vc.encode) + } + + return codec{ + encode: constructMapEncodeFunc(t, kc.encode, vc.encode, sortKeys), + decode: constructMapDecodeFunc(t, kc.decode, vc.decode), + } +} + +func constructMapEncodeFunc(t reflect.Type, encodeKey, encodeValue encodeFunc, sortKeys sortFunc) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeMap(b, p, t, encodeKey, encodeValue, sortKeys) + } +} + +func constructMapDecodeFunc(t reflect.Type, decodeKey, decodeValue decodeFunc) decodeFunc { + kt := t.Key() + vt := t.Elem() + kz := reflect.Zero(kt) + vz := reflect.Zero(vt) + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeMap(b, p, t, kt, vt, kz, vz, decodeKey, decodeValue) + } +} + +func constructStructCodec(t reflect.Type, seen map[reflect.Type]*structType, canAddr bool) codec { + st := constructStructType(t, seen, canAddr) + return codec{ + encode: constructStructEncodeFunc(st), + decode: constructStructDecodeFunc(st), + } +} + +func constructStructType(t reflect.Type, seen map[reflect.Type]*structType, canAddr bool) *structType { + // Used for preventing infinite recursion on types that have pointers to + // themselves. + st := seen[t] + + if st == nil { + st = &structType{ + fields: make([]structField, 0, t.NumField()), + fieldsIndex: make(map[string]*structField), + ficaseIndex: make(map[string]*structField), + typ: t, + } + + seen[t] = st + st.fields = appendStructFields(st.fields, t, 0, seen, canAddr) + + for i := range st.fields { + f := &st.fields[i] + s := strings.ToLower(f.name) + st.fieldsIndex[f.name] = f + // When there is ambiguity because multiple fields have the same + // case-insensitive representation, the first field must win. + if _, exists := st.ficaseIndex[s]; !exists { + st.ficaseIndex[s] = f + } + } + + // At a certain point the linear scan provided by keyset is less + // efficient than a map. The 32 was chosen based on benchmarks in the + // segmentio/asm repo run with an Intel Kaby Lake processor and go1.17. + if len(st.fields) <= 32 { + keys := make([][]byte, len(st.fields)) + for i, f := range st.fields { + keys[i] = []byte(f.name) + } + st.keyset = keyset.New(keys) + } + } + + return st +} + +func constructStructEncodeFunc(st *structType) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeStruct(b, p, st) + } +} + +func constructStructDecodeFunc(st *structType) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeStruct(b, p, st) + } +} + +func constructEmbeddedStructPointerCodec(t reflect.Type, unexported bool, offset uintptr, field codec) codec { + return codec{ + encode: constructEmbeddedStructPointerEncodeFunc(t, unexported, offset, field.encode), + decode: constructEmbeddedStructPointerDecodeFunc(t, unexported, offset, field.decode), + } +} + +func constructEmbeddedStructPointerEncodeFunc(t reflect.Type, unexported bool, offset uintptr, encode encodeFunc) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeEmbeddedStructPointer(b, p, t, unexported, offset, encode) + } +} + +func constructEmbeddedStructPointerDecodeFunc(t reflect.Type, unexported bool, offset uintptr, decode decodeFunc) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeEmbeddedStructPointer(b, p, t, unexported, offset, decode) + } +} + +func appendStructFields(fields []structField, t reflect.Type, offset uintptr, seen map[reflect.Type]*structType, canAddr bool) []structField { + type embeddedField struct { + index int + offset uintptr + pointer bool + unexported bool + subtype *structType + subfield *structField + } + + names := make(map[string]struct{}) + embedded := make([]embeddedField, 0, 10) + + for i, n := 0, t.NumField(); i < n; i++ { + f := t.Field(i) + + var ( + name = f.Name + anonymous = f.Anonymous + tag = false + omitempty = false + stringify = false + unexported = len(f.PkgPath) != 0 + ) + + if unexported && !anonymous { // unexported + continue + } + + if parts := strings.Split(f.Tag.Get("json"), ","); len(parts) != 0 { + if len(parts[0]) != 0 { + name, tag = parts[0], true + } + + if name == "-" && len(parts) == 1 { // ignored + continue + } + + if !isValidTag(name) { + name = f.Name + } + + for _, tag := range parts[1:] { + switch tag { + case "omitempty": + omitempty = true + case "string": + stringify = true + } + } + } + + if anonymous && !tag { // embedded + typ := f.Type + ptr := f.Type.Kind() == reflect.Ptr + + if ptr { + typ = f.Type.Elem() + } + + if typ.Kind() == reflect.Struct { + // When the embedded fields is inlined the fields can be looked + // up by offset from the address of the wrapping object, so we + // simply add the embedded struct fields to the list of fields + // of the current struct type. + subtype := constructStructType(typ, seen, canAddr) + + for j := range subtype.fields { + embedded = append(embedded, embeddedField{ + index: i<<32 | j, + offset: offset + f.Offset, + pointer: ptr, + unexported: unexported, + subtype: subtype, + subfield: &subtype.fields[j], + }) + } + + continue + } + + if unexported { // ignore unexported non-struct types + continue + } + } + + codec := constructCodec(f.Type, seen, canAddr) + + if stringify { + // https://golang.org/pkg/encoding/json/#Marshal + // + // The "string" option signals that a field is stored as JSON inside + // a JSON-encoded string. It applies only to fields of string, + // floating point, integer, or boolean types. This extra level of + // encoding is sometimes used when communicating with JavaScript + // programs: + typ := f.Type + + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + + switch typ.Kind() { + case reflect.Int, + reflect.Int8, + reflect.Int16, + reflect.Int32, + reflect.Int64, + reflect.Uint, + reflect.Uintptr, + reflect.Uint8, + reflect.Uint16, + reflect.Uint32, + reflect.Uint64: + codec.encode = constructStringEncodeFunc(codec.encode) + codec.decode = constructStringToIntDecodeFunc(typ, codec.decode) + case reflect.Bool, + reflect.Float32, + reflect.Float64, + reflect.String: + codec.encode = constructStringEncodeFunc(codec.encode) + codec.decode = constructStringDecodeFunc(codec.decode) + } + } + + fields = append(fields, structField{ + codec: codec, + offset: offset + f.Offset, + empty: emptyFuncOf(f.Type), + tag: tag, + omitempty: omitempty, + name: name, + index: i << 32, + typ: f.Type, + zero: reflect.Zero(f.Type), + }) + + names[name] = struct{}{} + } + + // Only unambiguous embedded fields must be serialized. + ambiguousNames := make(map[string]int) + ambiguousTags := make(map[string]int) + + // Embedded types can never override a field that was already present at + // the top-level. + for name := range names { + ambiguousNames[name]++ + ambiguousTags[name]++ + } + + for _, embfield := range embedded { + ambiguousNames[embfield.subfield.name]++ + if embfield.subfield.tag { + ambiguousTags[embfield.subfield.name]++ + } + } + + for _, embfield := range embedded { + subfield := *embfield.subfield + + if ambiguousNames[subfield.name] > 1 && !(subfield.tag && ambiguousTags[subfield.name] == 1) { + continue // ambiguous embedded field + } + + if embfield.pointer { + subfield.codec = constructEmbeddedStructPointerCodec(embfield.subtype.typ, embfield.unexported, subfield.offset, subfield.codec) + subfield.offset = embfield.offset + } else { + subfield.offset += embfield.offset + } + + // To prevent dominant flags more than one level below the embedded one. + subfield.tag = false + + // To ensure the order of the fields in the output is the same is in the + // struct type. + subfield.index = embfield.index + + fields = append(fields, subfield) + } + + for i := range fields { + name := fields[i].name + fields[i].json = encodeKeyFragment(name, 0) + fields[i].html = encodeKeyFragment(name, EscapeHTML) + } + + sort.Slice(fields, func(i, j int) bool { return fields[i].index < fields[j].index }) + return fields +} + +func encodeKeyFragment(s string, flags AppendFlags) string { + b := make([]byte, 1, len(s)+4) + b[0] = ',' + e := encoder{flags: flags} + b, _ = e.encodeString(b, unsafe.Pointer(&s)) + b = append(b, ':') + return *(*string)(unsafe.Pointer(&b)) +} + +func constructPointerCodec(t reflect.Type, seen map[reflect.Type]*structType) codec { + e := t.Elem() + c := constructCodec(e, seen, true) + return codec{ + encode: constructPointerEncodeFunc(e, c.encode), + decode: constructPointerDecodeFunc(e, c.decode), + } +} + +func constructPointerEncodeFunc(t reflect.Type, encode encodeFunc) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodePointer(b, p, t, encode) + } +} + +func constructPointerDecodeFunc(t reflect.Type, decode decodeFunc) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodePointer(b, p, t, decode) + } +} + +func constructInterfaceCodec(t reflect.Type) codec { + return codec{ + encode: constructMaybeEmptyInterfaceEncoderFunc(t), + decode: constructMaybeEmptyInterfaceDecoderFunc(t), + } +} + +func constructMaybeEmptyInterfaceEncoderFunc(t reflect.Type) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeMaybeEmptyInterface(b, p, t) + } +} + +func constructMaybeEmptyInterfaceDecoderFunc(t reflect.Type) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeMaybeEmptyInterface(b, p, t) + } +} + +func constructUnsupportedTypeCodec(t reflect.Type) codec { + return codec{ + encode: constructUnsupportedTypeEncodeFunc(t), + decode: constructUnsupportedTypeDecodeFunc(t), + } +} + +func constructUnsupportedTypeEncodeFunc(t reflect.Type) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeUnsupportedTypeError(b, p, t) + } +} + +func constructUnsupportedTypeDecodeFunc(t reflect.Type) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeUnmarshalTypeError(b, p, t) + } +} + +func constructJSONMarshalerEncodeFunc(t reflect.Type, pointer bool) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeJSONMarshaler(b, p, t, pointer) + } +} + +func constructJSONUnmarshalerDecodeFunc(t reflect.Type, pointer bool) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeJSONUnmarshaler(b, p, t, pointer) + } +} + +func constructTextMarshalerEncodeFunc(t reflect.Type, pointer bool) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeTextMarshaler(b, p, t, pointer) + } +} + +func constructTextUnmarshalerDecodeFunc(t reflect.Type, pointer bool) decodeFunc { + return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return d.decodeTextUnmarshaler(b, p, t, pointer) + } +} + +func constructInlineValueEncodeFunc(encode encodeFunc) encodeFunc { + return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { + return encode(e, b, noescape(unsafe.Pointer(&p))) + } +} + +// noescape hides a pointer from escape analysis. noescape is +// the identity function but escape analysis doesn't think the +// output depends on the input. noescape is inlined and currently +// compiles down to zero instructions. +// USE CAREFULLY! +// This was copied from the runtime; see issues 23382 and 7921. +//go:nosplit +func noescape(p unsafe.Pointer) unsafe.Pointer { + x := uintptr(p) + return unsafe.Pointer(x ^ 0) +} + +func alignedSize(t reflect.Type) uintptr { + a := t.Align() + s := t.Size() + return align(uintptr(a), uintptr(s)) +} + +func align(align, size uintptr) uintptr { + if align != 0 && (size%align) != 0 { + size = ((size / align) + 1) * align + } + return size +} + +func inlined(t reflect.Type) bool { + switch t.Kind() { + case reflect.Ptr: + return true + case reflect.Map: + return true + case reflect.Struct: + return t.NumField() == 1 && inlined(t.Field(0).Type) + default: + return false + } +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:;<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +func emptyFuncOf(t reflect.Type) emptyFunc { + switch t { + case bytesType, rawMessageType: + return func(p unsafe.Pointer) bool { return (*slice)(p).len == 0 } + } + + switch t.Kind() { + case reflect.Array: + if t.Len() == 0 { + return func(unsafe.Pointer) bool { return true } + } + + case reflect.Map: + return func(p unsafe.Pointer) bool { return reflect.NewAt(t, p).Elem().Len() == 0 } + + case reflect.Slice: + return func(p unsafe.Pointer) bool { return (*slice)(p).len == 0 } + + case reflect.String: + return func(p unsafe.Pointer) bool { return len(*(*string)(p)) == 0 } + + case reflect.Bool: + return func(p unsafe.Pointer) bool { return !*(*bool)(p) } + + case reflect.Int, reflect.Uint: + return func(p unsafe.Pointer) bool { return *(*uint)(p) == 0 } + + case reflect.Uintptr: + return func(p unsafe.Pointer) bool { return *(*uintptr)(p) == 0 } + + case reflect.Int8, reflect.Uint8: + return func(p unsafe.Pointer) bool { return *(*uint8)(p) == 0 } + + case reflect.Int16, reflect.Uint16: + return func(p unsafe.Pointer) bool { return *(*uint16)(p) == 0 } + + case reflect.Int32, reflect.Uint32: + return func(p unsafe.Pointer) bool { return *(*uint32)(p) == 0 } + + case reflect.Int64, reflect.Uint64: + return func(p unsafe.Pointer) bool { return *(*uint64)(p) == 0 } + + case reflect.Float32: + return func(p unsafe.Pointer) bool { return *(*float32)(p) == 0 } + + case reflect.Float64: + return func(p unsafe.Pointer) bool { return *(*float64)(p) == 0 } + + case reflect.Ptr: + return func(p unsafe.Pointer) bool { return *(*unsafe.Pointer)(p) == nil } + + case reflect.Interface: + return func(p unsafe.Pointer) bool { return (*iface)(p).ptr == nil } + } + + return func(unsafe.Pointer) bool { return false } +} + +type iface struct { + typ unsafe.Pointer + ptr unsafe.Pointer +} + +type slice struct { + data unsafe.Pointer + len int + cap int +} + +type structType struct { + fields []structField + fieldsIndex map[string]*structField + ficaseIndex map[string]*structField + keyset []byte + typ reflect.Type + inlined bool +} + +type structField struct { + codec codec + offset uintptr + empty emptyFunc + tag bool + omitempty bool + json string + html string + name string + typ reflect.Type + zero reflect.Value + index int +} + +func unmarshalTypeError(b []byte, t reflect.Type) error { + return &UnmarshalTypeError{Value: strconv.Quote(prefix(b)), Type: t} +} + +func unmarshalOverflow(b []byte, t reflect.Type) error { + return &UnmarshalTypeError{Value: "number " + prefix(b) + " overflows", Type: t} +} + +func unexpectedEOF(b []byte) error { + return syntaxError(b, "unexpected end of JSON input") +} + +var syntaxErrorMsgOffset = ^uintptr(0) + +func init() { + t := reflect.TypeOf(SyntaxError{}) + for i, n := 0, t.NumField(); i < n; i++ { + if f := t.Field(i); f.Type.Kind() == reflect.String { + syntaxErrorMsgOffset = f.Offset + } + } +} + +func syntaxError(b []byte, msg string, args ...interface{}) error { + e := new(SyntaxError) + i := syntaxErrorMsgOffset + if i != ^uintptr(0) { + s := "json: " + fmt.Sprintf(msg, args...) + ": " + prefix(b) + p := unsafe.Pointer(e) + // Hack to set the unexported `msg` field. + *(*string)(unsafe.Pointer(uintptr(p) + i)) = s + } + return e +} + +func objectKeyError(b []byte, err error) ([]byte, error) { + if len(b) == 0 { + return nil, unexpectedEOF(b) + } + switch err.(type) { + case *UnmarshalTypeError: + err = syntaxError(b, "invalid character '%c' looking for beginning of object key", b[0]) + } + return b, err +} + +func prefix(b []byte) string { + if len(b) < 32 { + return string(b) + } + return string(b[:32]) + "..." +} + +func intStringsAreSorted(i0, i1 int64) bool { + var b0, b1 [32]byte + return string(strconv.AppendInt(b0[:0], i0, 10)) < string(strconv.AppendInt(b1[:0], i1, 10)) +} + +func uintStringsAreSorted(u0, u1 uint64) bool { + var b0, b1 [32]byte + return string(strconv.AppendUint(b0[:0], u0, 10)) < string(strconv.AppendUint(b1[:0], u1, 10)) +} + +func stringToBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer(&sliceHeader{ + Data: *(*unsafe.Pointer)(unsafe.Pointer(&s)), + Len: len(s), + Cap: len(s), + })) +} + +type sliceHeader struct { + Data unsafe.Pointer + Len int + Cap int +} + +var ( + nullType = reflect.TypeOf(nil) + boolType = reflect.TypeOf(false) + + intType = reflect.TypeOf(int(0)) + int8Type = reflect.TypeOf(int8(0)) + int16Type = reflect.TypeOf(int16(0)) + int32Type = reflect.TypeOf(int32(0)) + int64Type = reflect.TypeOf(int64(0)) + + uintType = reflect.TypeOf(uint(0)) + uint8Type = reflect.TypeOf(uint8(0)) + uint16Type = reflect.TypeOf(uint16(0)) + uint32Type = reflect.TypeOf(uint32(0)) + uint64Type = reflect.TypeOf(uint64(0)) + uintptrType = reflect.TypeOf(uintptr(0)) + + float32Type = reflect.TypeOf(float32(0)) + float64Type = reflect.TypeOf(float64(0)) + + numberType = reflect.TypeOf(json.Number("")) + stringType = reflect.TypeOf("") + stringsType = reflect.TypeOf([]string(nil)) + bytesType = reflect.TypeOf(([]byte)(nil)) + durationType = reflect.TypeOf(time.Duration(0)) + timeType = reflect.TypeOf(time.Time{}) + rawMessageType = reflect.TypeOf(RawMessage(nil)) + + numberPtrType = reflect.PtrTo(numberType) + durationPtrType = reflect.PtrTo(durationType) + timePtrType = reflect.PtrTo(timeType) + rawMessagePtrType = reflect.PtrTo(rawMessageType) + + sliceInterfaceType = reflect.TypeOf(([]interface{})(nil)) + sliceStringType = reflect.TypeOf(([]interface{})(nil)) + mapStringInterfaceType = reflect.TypeOf((map[string]interface{})(nil)) + mapStringRawMessageType = reflect.TypeOf((map[string]RawMessage)(nil)) + mapStringStringType = reflect.TypeOf((map[string]string)(nil)) + mapStringStringSliceType = reflect.TypeOf((map[string][]string)(nil)) + mapStringBoolType = reflect.TypeOf((map[string]bool)(nil)) + + interfaceType = reflect.TypeOf((*interface{})(nil)).Elem() + jsonMarshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + jsonUnmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) + +// ============================================================================= +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// appendDuration appends a human-readable representation of d to b. +// +// The function copies the implementation of time.Duration.String but prevents +// Go from making a dynamic memory allocation on the returned value. +func appendDuration(b []byte, d time.Duration) []byte { + // Largest time is 2540400h10m10.000000000s + var buf [32]byte + w := len(buf) + + u := uint64(d) + neg := d < 0 + if neg { + u = -u + } + + if u < uint64(time.Second) { + // Special case: if duration is smaller than a second, + // use smaller units, like 1.2ms + var prec int + w-- + buf[w] = 's' + w-- + switch { + case u == 0: + return append(b, '0', 's') + case u < uint64(time.Microsecond): + // print nanoseconds + prec = 0 + buf[w] = 'n' + case u < uint64(time.Millisecond): + // print microseconds + prec = 3 + // U+00B5 'µ' micro sign == 0xC2 0xB5 + w-- // Need room for two bytes. + copy(buf[w:], "µ") + default: + // print milliseconds + prec = 6 + buf[w] = 'm' + } + w, u = fmtFrac(buf[:w], u, prec) + w = fmtInt(buf[:w], u) + } else { + w-- + buf[w] = 's' + + w, u = fmtFrac(buf[:w], u, 9) + + // u is now integer seconds + w = fmtInt(buf[:w], u%60) + u /= 60 + + // u is now integer minutes + if u > 0 { + w-- + buf[w] = 'm' + w = fmtInt(buf[:w], u%60) + u /= 60 + + // u is now integer hours + // Stop at hours because days can be different lengths. + if u > 0 { + w-- + buf[w] = 'h' + w = fmtInt(buf[:w], u) + } + } + } + + if neg { + w-- + buf[w] = '-' + } + + return append(b, buf[w:]...) +} + +// fmtFrac formats the fraction of v/10**prec (e.g., ".12345") into the +// tail of buf, omitting trailing zeros. it omits the decimal +// point too when the fraction is 0. It returns the index where the +// output bytes begin and the value v/10**prec. +func fmtFrac(buf []byte, v uint64, prec int) (nw int, nv uint64) { + // Omit trailing zeros up to and including decimal point. + w := len(buf) + print := false + for i := 0; i < prec; i++ { + digit := v % 10 + print = print || digit != 0 + if print { + w-- + buf[w] = byte(digit) + '0' + } + v /= 10 + } + if print { + w-- + buf[w] = '.' + } + return w, v +} + +// fmtInt formats v into the tail of buf. +// It returns the index where the output begins. +func fmtInt(buf []byte, v uint64) int { + w := len(buf) + if v == 0 { + w-- + buf[w] = '0' + } else { + for v > 0 { + w-- + buf[w] = byte(v%10) + '0' + v /= 10 + } + } + return w +} + +// ============================================================================= diff --git a/vendor/github.com/segmentio/encoding/json/decode.go b/vendor/github.com/segmentio/encoding/json/decode.go new file mode 100644 index 00000000000..b1723c200c3 --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/decode.go @@ -0,0 +1,1462 @@ +package json + +import ( + "bytes" + "encoding" + "encoding/json" + "fmt" + "math" + "reflect" + "strconv" + "time" + "unsafe" + + "github.com/segmentio/asm/base64" + "github.com/segmentio/asm/keyset" + "github.com/segmentio/encoding/iso8601" +) + +func (d decoder) decodeNull(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + return d.inputError(b, nullType) +} + +func (d decoder) decodeBool(b []byte, p unsafe.Pointer) ([]byte, error) { + switch { + case hasTruePrefix(b): + *(*bool)(p) = true + return b[4:], nil + + case hasFalsePrefix(b): + *(*bool)(p) = false + return b[5:], nil + + case hasNullPrefix(b): + return b[4:], nil + + default: + return d.inputError(b, boolType) + } +} + +func (d decoder) decodeInt(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseInt(b, intType) + if err != nil { + return r, err + } + + *(*int)(p) = int(v) + return r, nil +} + +func (d decoder) decodeInt8(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseInt(b, int8Type) + if err != nil { + return r, err + } + + if v < math.MinInt8 || v > math.MaxInt8 { + return r, unmarshalOverflow(b[:len(b)-len(r)], int8Type) + } + + *(*int8)(p) = int8(v) + return r, nil +} + +func (d decoder) decodeInt16(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseInt(b, int16Type) + if err != nil { + return r, err + } + + if v < math.MinInt16 || v > math.MaxInt16 { + return r, unmarshalOverflow(b[:len(b)-len(r)], int16Type) + } + + *(*int16)(p) = int16(v) + return r, nil +} + +func (d decoder) decodeInt32(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseInt(b, int32Type) + if err != nil { + return r, err + } + + if v < math.MinInt32 || v > math.MaxInt32 { + return r, unmarshalOverflow(b[:len(b)-len(r)], int32Type) + } + + *(*int32)(p) = int32(v) + return r, nil +} + +func (d decoder) decodeInt64(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseInt(b, int64Type) + if err != nil { + return r, err + } + + *(*int64)(p) = v + return r, nil +} + +func (d decoder) decodeUint(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseUint(b, uintType) + if err != nil { + return r, err + } + + *(*uint)(p) = uint(v) + return r, nil +} + +func (d decoder) decodeUintptr(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseUint(b, uintptrType) + if err != nil { + return r, err + } + + *(*uintptr)(p) = uintptr(v) + return r, nil +} + +func (d decoder) decodeUint8(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseUint(b, uint8Type) + if err != nil { + return r, err + } + + if v > math.MaxUint8 { + return r, unmarshalOverflow(b[:len(b)-len(r)], uint8Type) + } + + *(*uint8)(p) = uint8(v) + return r, nil +} + +func (d decoder) decodeUint16(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseUint(b, uint16Type) + if err != nil { + return r, err + } + + if v > math.MaxUint16 { + return r, unmarshalOverflow(b[:len(b)-len(r)], uint16Type) + } + + *(*uint16)(p) = uint16(v) + return r, nil +} + +func (d decoder) decodeUint32(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseUint(b, uint32Type) + if err != nil { + return r, err + } + + if v > math.MaxUint32 { + return r, unmarshalOverflow(b[:len(b)-len(r)], uint32Type) + } + + *(*uint32)(p) = uint32(v) + return r, nil +} + +func (d decoder) decodeUint64(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, err := d.parseUint(b, uint64Type) + if err != nil { + return r, err + } + + *(*uint64)(p) = v + return r, nil +} + +func (d decoder) decodeFloat32(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, _, err := d.parseNumber(b) + if err != nil { + return d.inputError(b, float32Type) + } + + f, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&v)), 32) + if err != nil { + return d.inputError(b, float32Type) + } + + *(*float32)(p) = float32(f) + return r, nil +} + +func (d decoder) decodeFloat64(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, _, err := d.parseNumber(b) + if err != nil { + return d.inputError(b, float64Type) + } + + f, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&v)), 64) + if err != nil { + return d.inputError(b, float64Type) + } + + *(*float64)(p) = f + return r, nil +} + +func (d decoder) decodeNumber(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + v, r, _, err := d.parseNumber(b) + if err != nil { + return d.inputError(b, numberType) + } + + if (d.flags & DontCopyNumber) != 0 { + *(*Number)(p) = *(*Number)(unsafe.Pointer(&v)) + } else { + *(*Number)(p) = Number(v) + } + + return r, nil +} + +func (d decoder) decodeString(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + s, r, new, err := d.parseStringUnquote(b, nil) + if err != nil { + if len(b) == 0 || b[0] != '"' { + return d.inputError(b, stringType) + } + return r, err + } + + if new || (d.flags&DontCopyString) != 0 { + *(*string)(p) = *(*string)(unsafe.Pointer(&s)) + } else { + *(*string)(p) = string(s) + } + + return r, nil +} + +func (d decoder) decodeFromString(b []byte, p unsafe.Pointer, decode decodeFunc) ([]byte, error) { + if hasNullPrefix(b) { + return decode(d, b, p) + } + + v, b, _, err := d.parseStringUnquote(b, nil) + if err != nil { + return d.inputError(v, stringType) + } + + if v, err = decode(d, v, p); err != nil { + return b, err + } + + if v = skipSpaces(v); len(v) != 0 { + return b, syntaxError(v, "unexpected trailing tokens after string value") + } + + return b, nil +} + +func (d decoder) decodeFromStringToInt(b []byte, p unsafe.Pointer, t reflect.Type, decode decodeFunc) ([]byte, error) { + if hasNullPrefix(b) { + return decode(d, b, p) + } + + if len(b) > 0 && b[0] != '"' { + v, r, k, err := d.parseNumber(b) + if err == nil { + // The encoding/json package will return a *json.UnmarshalTypeError if + // the input was a floating point number representation, even tho a + // string is expected here. + if k == Float { + _, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&v)), 64) + if err != nil { + return r, unmarshalTypeError(v, t) + } + } + } + return r, fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into int") + } + + if len(b) > 1 && b[0] == '"' && b[1] == '"' { + return b, fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal \"\" into int") + } + + v, b, _, err := d.parseStringUnquote(b, nil) + if err != nil { + return d.inputError(v, t) + } + + if hasLeadingZeroes(v) { + // In this context the encoding/json package accepts leading zeroes because + // it is not constrained by the JSON syntax, remove them so the parsing + // functions don't return syntax errors. + u := make([]byte, 0, len(v)) + i := 0 + + if i < len(v) && v[i] == '-' || v[i] == '+' { + u = append(u, v[i]) + i++ + } + + for (i+1) < len(v) && v[i] == '0' && '0' <= v[i+1] && v[i+1] <= '9' { + i++ + } + + v = append(u, v[i:]...) + } + + if r, err := decode(d, v, p); err != nil { + if _, isSyntaxError := err.(*SyntaxError); isSyntaxError { + if hasPrefix(v, "-") { + // The standard library interprets sequences of '-' characters + // as numbers but still returns type errors in this case... + return b, unmarshalTypeError(v, t) + } + return b, fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into int", prefix(v)) + } + // When the input value was a valid number representation we retain the + // error returned by the decoder. + if _, _, _, err := d.parseNumber(v); err != nil { + // When the input value valid JSON we mirror the behavior of the + // encoding/json package and return a generic error. + if _, _, _, err := d.parseValue(v); err == nil { + return b, fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into int", prefix(v)) + } + } + return b, err + } else if len(r) != 0 { + return r, unmarshalTypeError(v, t) + } + + return b, nil +} + +func (d decoder) decodeBytes(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + *(*[]byte)(p) = nil + return b[4:], nil + } + + if len(b) < 2 { + return d.inputError(b, bytesType) + } + + if b[0] != '"' { + // Go 1.7- behavior: bytes slices may be decoded from array of integers. + if len(b) > 0 && b[0] == '[' { + return d.decodeSlice(b, p, 1, bytesType, decoder.decodeUint8) + } + return d.inputError(b, bytesType) + } + + // The input string contains escaped sequences, we need to parse it before + // decoding it to match the encoding/json package behvaior. + src, r, _, err := d.parseStringUnquote(b, nil) + if err != nil { + return d.inputError(b, bytesType) + } + + dst := make([]byte, base64.StdEncoding.DecodedLen(len(src))) + + n, err := base64.StdEncoding.Decode(dst, src) + if err != nil { + return r, err + } + + *(*[]byte)(p) = dst[:n] + return r, nil +} + +func (d decoder) decodeDuration(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + // in order to inter-operate with the stdlib, we must be able to interpret + // durations passed as integer values. there's some discussion about being + // flexible on how durations are formatted, but for the time being, it's + // been punted to go2 at the earliest: https://github.com/golang/go/issues/4712 + if len(b) > 0 && b[0] != '"' { + v, r, err := d.parseInt(b, durationType) + if err != nil { + return d.inputError(b, int32Type) + } + + if v < math.MinInt64 || v > math.MaxInt64 { + return r, unmarshalOverflow(b[:len(b)-len(r)], int32Type) + } + + *(*time.Duration)(p) = time.Duration(v) + return r, nil + } + + if len(b) < 2 || b[0] != '"' { + return d.inputError(b, durationType) + } + + i := bytes.IndexByte(b[1:], '"') + 1 + if i <= 0 { + return d.inputError(b, durationType) + } + + s := b[1:i] // trim quotes + + v, err := time.ParseDuration(*(*string)(unsafe.Pointer(&s))) + if err != nil { + return d.inputError(b, durationType) + } + + *(*time.Duration)(p) = v + return b[i+1:], nil +} + +func (d decoder) decodeTime(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + if len(b) < 2 || b[0] != '"' { + return d.inputError(b, timeType) + } + + i := bytes.IndexByte(b[1:], '"') + 1 + if i <= 0 { + return d.inputError(b, timeType) + } + + s := b[1:i] // trim quotes + + v, err := iso8601.Parse(*(*string)(unsafe.Pointer(&s))) + if err != nil { + return d.inputError(b, timeType) + } + + *(*time.Time)(p) = v + return b[i+1:], nil +} + +func (d decoder) decodeArray(b []byte, p unsafe.Pointer, n int, size uintptr, t reflect.Type, decode decodeFunc) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + if len(b) < 2 || b[0] != '[' { + return d.inputError(b, t) + } + b = b[1:] + + var err error + for i := 0; i < n; i++ { + b = skipSpaces(b) + + if i != 0 { + if len(b) == 0 { + return b, syntaxError(b, "unexpected EOF after array element") + } + switch b[0] { + case ',': + b = skipSpaces(b[1:]) + case ']': + return b[1:], nil + default: + return b, syntaxError(b, "expected ',' after array element but found '%c'", b[0]) + } + } + + b, err = decode(d, b, unsafe.Pointer(uintptr(p)+(uintptr(i)*size))) + if err != nil { + if e, ok := err.(*UnmarshalTypeError); ok { + e.Struct = t.String() + e.Struct + e.Field = d.prependField(strconv.Itoa(i), e.Field) + } + return b, err + } + } + + // The encoding/json package ignores extra elements found when decoding into + // array types (which have a fixed size). + for { + b = skipSpaces(b) + + if len(b) == 0 { + return b, syntaxError(b, "missing closing ']' in array value") + } + + switch b[0] { + case ',': + b = skipSpaces(b[1:]) + case ']': + return b[1:], nil + } + + _, b, _, err = d.parseValue(b) + if err != nil { + return b, err + } + } +} + +var ( + // This is a placeholder used to consturct non-nil empty slices. + empty struct{} +) + +func (d decoder) decodeSlice(b []byte, p unsafe.Pointer, size uintptr, t reflect.Type, decode decodeFunc) ([]byte, error) { + if hasNullPrefix(b) { + *(*slice)(p) = slice{} + return b[4:], nil + } + + if len(b) < 2 { + return d.inputError(b, t) + } + + if b[0] != '[' { + // Go 1.7- behavior: fallback to decoding as a []byte if the element + // type is byte; allow conversions from JSON strings even tho the + // underlying type implemented unmarshaler interfaces. + if t.Elem().Kind() == reflect.Uint8 { + return d.decodeBytes(b, p) + } + return d.inputError(b, t) + } + + input := b + b = b[1:] + + s := (*slice)(p) + s.len = 0 + + var err error + for { + b = skipSpaces(b) + + if len(b) != 0 && b[0] == ']' { + if s.data == nil { + s.data = unsafe.Pointer(&empty) + } + return b[1:], nil + } + + if s.len != 0 { + if len(b) == 0 { + return b, syntaxError(b, "unexpected EOF after array element") + } + if b[0] != ',' { + return b, syntaxError(b, "expected ',' after array element but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + } + + if s.len == s.cap { + c := s.cap + + if c == 0 { + c = 10 + } else { + c *= 2 + } + + *s = extendSlice(t, s, c) + } + + b, err = decode(d, b, unsafe.Pointer(uintptr(s.data)+(uintptr(s.len)*size))) + if err != nil { + if _, r, _, err := d.parseValue(input); err != nil { + return r, err + } else { + b = r + } + if e, ok := err.(*UnmarshalTypeError); ok { + e.Struct = t.String() + e.Struct + e.Field = d.prependField(strconv.Itoa(s.len), e.Field) + } + return b, err + } + + s.len++ + } +} + +func (d decoder) decodeMap(b []byte, p unsafe.Pointer, t, kt, vt reflect.Type, kz, vz reflect.Value, decodeKey, decodeValue decodeFunc) ([]byte, error) { + if hasNullPrefix(b) { + *(*unsafe.Pointer)(p) = nil + return b[4:], nil + } + + if len(b) < 2 || b[0] != '{' { + return d.inputError(b, t) + } + i := 0 + m := reflect.NewAt(t, p).Elem() + + k := reflect.New(kt).Elem() + v := reflect.New(vt).Elem() + + kptr := (*iface)(unsafe.Pointer(&k)).ptr + vptr := (*iface)(unsafe.Pointer(&v)).ptr + input := b + + if m.IsNil() { + m = reflect.MakeMap(t) + } + + var err error + b = b[1:] + for { + k.Set(kz) + v.Set(vz) + b = skipSpaces(b) + + if len(b) != 0 && b[0] == '}' { + *(*unsafe.Pointer)(p) = unsafe.Pointer(m.Pointer()) + return b[1:], nil + } + + if i != 0 { + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field value") + } + if b[0] != ',' { + return b, syntaxError(b, "expected ',' after object field value but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + } + + if hasNullPrefix(b) { + return b, syntaxError(b, "cannot decode object key string from 'null' value") + } + + if b, err = decodeKey(d, b, kptr); err != nil { + return objectKeyError(b, err) + } + b = skipSpaces(b) + + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field key") + } + if b[0] != ':' { + return b, syntaxError(b, "expected ':' after object field key but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + + if b, err = decodeValue(d, b, vptr); err != nil { + if _, r, _, err := d.parseValue(input); err != nil { + return r, err + } else { + b = r + } + if e, ok := err.(*UnmarshalTypeError); ok { + e.Struct = "map[" + kt.String() + "]" + vt.String() + "{" + e.Struct + "}" + e.Field = d.prependField(fmt.Sprint(k.Interface()), e.Field) + } + return b, err + } + + m.SetMapIndex(k, v) + i++ + } +} + +func (d decoder) decodeMapStringInterface(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + *(*unsafe.Pointer)(p) = nil + return b[4:], nil + } + + if len(b) < 2 || b[0] != '{' { + return d.inputError(b, mapStringInterfaceType) + } + + i := 0 + m := *(*map[string]interface{})(p) + + if m == nil { + m = make(map[string]interface{}, 64) + } + + var err error + var key string + var val interface{} + var input = b + + b = b[1:] + for { + key = "" + val = nil + + b = skipSpaces(b) + + if len(b) != 0 && b[0] == '}' { + *(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(unsafe.Pointer(&m)) + return b[1:], nil + } + + if i != 0 { + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field value") + } + if b[0] != ',' { + return b, syntaxError(b, "expected ',' after object field value but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + } + + if hasNullPrefix(b) { + return b, syntaxError(b, "cannot decode object key string from 'null' value") + } + + b, err = d.decodeString(b, unsafe.Pointer(&key)) + if err != nil { + return objectKeyError(b, err) + } + b = skipSpaces(b) + + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field key") + } + if b[0] != ':' { + return b, syntaxError(b, "expected ':' after object field key but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + + b, err = d.decodeInterface(b, unsafe.Pointer(&val)) + if err != nil { + if _, r, _, err := d.parseValue(input); err != nil { + return r, err + } else { + b = r + } + if e, ok := err.(*UnmarshalTypeError); ok { + e.Struct = mapStringInterfaceType.String() + e.Struct + e.Field = d.prependField(key, e.Field) + } + return b, err + } + + m[key] = val + i++ + } +} + +func (d decoder) decodeMapStringRawMessage(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + *(*unsafe.Pointer)(p) = nil + return b[4:], nil + } + + if len(b) < 2 || b[0] != '{' { + return d.inputError(b, mapStringRawMessageType) + } + + i := 0 + m := *(*map[string]RawMessage)(p) + + if m == nil { + m = make(map[string]RawMessage, 64) + } + + var err error + var key string + var val RawMessage + var input = b + + b = b[1:] + for { + key = "" + val = nil + + b = skipSpaces(b) + + if len(b) != 0 && b[0] == '}' { + *(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(unsafe.Pointer(&m)) + return b[1:], nil + } + + if i != 0 { + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field value") + } + if b[0] != ',' { + return b, syntaxError(b, "expected ',' after object field value but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + } + + if hasNullPrefix(b) { + return b, syntaxError(b, "cannot decode object key string from 'null' value") + } + + b, err = d.decodeString(b, unsafe.Pointer(&key)) + if err != nil { + return objectKeyError(b, err) + } + b = skipSpaces(b) + + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field key") + } + if b[0] != ':' { + return b, syntaxError(b, "expected ':' after object field key but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + + b, err = d.decodeRawMessage(b, unsafe.Pointer(&val)) + if err != nil { + if _, r, _, err := d.parseValue(input); err != nil { + return r, err + } else { + b = r + } + if e, ok := err.(*UnmarshalTypeError); ok { + e.Struct = mapStringRawMessageType.String() + e.Struct + e.Field = d.prependField(key, e.Field) + } + return b, err + } + + m[key] = val + i++ + } +} + +func (d decoder) decodeMapStringString(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + *(*unsafe.Pointer)(p) = nil + return b[4:], nil + } + + if len(b) < 2 || b[0] != '{' { + return d.inputError(b, mapStringStringType) + } + + i := 0 + m := *(*map[string]string)(p) + + if m == nil { + m = make(map[string]string, 64) + } + + var err error + var key string + var val string + var input = b + + b = b[1:] + for { + key = "" + val = "" + + b = skipSpaces(b) + + if len(b) != 0 && b[0] == '}' { + *(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(unsafe.Pointer(&m)) + return b[1:], nil + } + + if i != 0 { + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field value") + } + if b[0] != ',' { + return b, syntaxError(b, "expected ',' after object field value but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + } + + if hasNullPrefix(b) { + return b, syntaxError(b, "cannot decode object key string from 'null' value") + } + + b, err = d.decodeString(b, unsafe.Pointer(&key)) + if err != nil { + return objectKeyError(b, err) + } + b = skipSpaces(b) + + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field key") + } + if b[0] != ':' { + return b, syntaxError(b, "expected ':' after object field key but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + + b, err = d.decodeString(b, unsafe.Pointer(&val)) + if err != nil { + if _, r, _, err := d.parseValue(input); err != nil { + return r, err + } else { + b = r + } + if e, ok := err.(*UnmarshalTypeError); ok { + e.Struct = mapStringStringType.String() + e.Struct + e.Field = d.prependField(key, e.Field) + } + return b, err + } + + m[key] = val + i++ + } +} + +func (d decoder) decodeMapStringStringSlice(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + *(*unsafe.Pointer)(p) = nil + return b[4:], nil + } + + if len(b) < 2 || b[0] != '{' { + return d.inputError(b, mapStringStringSliceType) + } + + i := 0 + m := *(*map[string][]string)(p) + + if m == nil { + m = make(map[string][]string, 64) + } + + var err error + var key string + var buf []string + var input = b + var stringSize = unsafe.Sizeof("") + + b = b[1:] + for { + key = "" + buf = buf[:0] + + b = skipSpaces(b) + + if len(b) != 0 && b[0] == '}' { + *(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(unsafe.Pointer(&m)) + return b[1:], nil + } + + if i != 0 { + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field value") + } + if b[0] != ',' { + return b, syntaxError(b, "expected ',' after object field value but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + } + + if hasNullPrefix(b) { + return b, syntaxError(b, "cannot decode object key string from 'null' value") + } + + b, err = d.decodeString(b, unsafe.Pointer(&key)) + if err != nil { + return objectKeyError(b, err) + } + b = skipSpaces(b) + + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field key") + } + if b[0] != ':' { + return b, syntaxError(b, "expected ':' after object field key but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + + b, err = d.decodeSlice(b, unsafe.Pointer(&buf), stringSize, sliceStringType, decoder.decodeString) + if err != nil { + if _, r, _, err := d.parseValue(input); err != nil { + return r, err + } else { + b = r + } + if e, ok := err.(*UnmarshalTypeError); ok { + e.Struct = mapStringStringType.String() + e.Struct + e.Field = d.prependField(key, e.Field) + } + return b, err + } + + val := make([]string, len(buf)) + copy(val, buf) + + m[key] = val + i++ + } +} + +func (d decoder) decodeMapStringBool(b []byte, p unsafe.Pointer) ([]byte, error) { + if hasNullPrefix(b) { + *(*unsafe.Pointer)(p) = nil + return b[4:], nil + } + + if len(b) < 2 || b[0] != '{' { + return d.inputError(b, mapStringBoolType) + } + + i := 0 + m := *(*map[string]bool)(p) + + if m == nil { + m = make(map[string]bool, 64) + } + + var err error + var key string + var val bool + var input = b + + b = b[1:] + for { + key = "" + val = false + + b = skipSpaces(b) + + if len(b) != 0 && b[0] == '}' { + *(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(unsafe.Pointer(&m)) + return b[1:], nil + } + + if i != 0 { + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field value") + } + if b[0] != ',' { + return b, syntaxError(b, "expected ',' after object field value but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + } + + if hasNullPrefix(b) { + return b, syntaxError(b, "cannot decode object key string from 'null' value") + } + + b, err = d.decodeString(b, unsafe.Pointer(&key)) + if err != nil { + return objectKeyError(b, err) + } + b = skipSpaces(b) + + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field key") + } + if b[0] != ':' { + return b, syntaxError(b, "expected ':' after object field key but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + + b, err = d.decodeBool(b, unsafe.Pointer(&val)) + if err != nil { + if _, r, _, err := d.parseValue(input); err != nil { + return r, err + } else { + b = r + } + if e, ok := err.(*UnmarshalTypeError); ok { + e.Struct = mapStringStringType.String() + e.Struct + e.Field = d.prependField(key, e.Field) + } + return b, err + } + + m[key] = val + i++ + } +} + +func (d decoder) decodeStruct(b []byte, p unsafe.Pointer, st *structType) ([]byte, error) { + if hasNullPrefix(b) { + return b[4:], nil + } + + if len(b) < 2 || b[0] != '{' { + return d.inputError(b, st.typ) + } + + var err error + var k []byte + var i int + + // memory buffer used to convert short field names to lowercase + var buf [64]byte + var key []byte + var input = b + + b = b[1:] + for { + b = skipSpaces(b) + + if len(b) != 0 && b[0] == '}' { + return b[1:], nil + } + + if i != 0 { + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field value") + } + if b[0] != ',' { + return b, syntaxError(b, "expected ',' after object field value but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + } + i++ + + if hasNullPrefix(b) { + return b, syntaxError(b, "cannot decode object key string from 'null' value") + } + + k, b, _, err = d.parseStringUnquote(b, nil) + if err != nil { + return objectKeyError(b, err) + } + b = skipSpaces(b) + + if len(b) == 0 { + return b, syntaxError(b, "unexpected end of JSON input after object field key") + } + if b[0] != ':' { + return b, syntaxError(b, "expected ':' after object field key but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + + var f *structField + if len(st.keyset) != 0 { + if n := keyset.Lookup(st.keyset, k); n < len(st.fields) { + f = &st.fields[n] + } + } else { + f = st.fieldsIndex[string(k)] + } + + if f == nil && (d.flags&DontMatchCaseInsensitiveStructFields) == 0 { + key = appendToLower(buf[:0], k) + f = st.ficaseIndex[string(key)] + } + + if f == nil { + if (d.flags & DisallowUnknownFields) != 0 { + return b, fmt.Errorf("json: unknown field %q", k) + } + if _, b, _, err = d.parseValue(b); err != nil { + return b, err + } + continue + } + + if b, err = f.codec.decode(d, b, unsafe.Pointer(uintptr(p)+f.offset)); err != nil { + if _, r, _, err := d.parseValue(input); err != nil { + return r, err + } else { + b = r + } + if e, ok := err.(*UnmarshalTypeError); ok { + e.Struct = st.typ.String() + e.Struct + e.Field = d.prependField(string(k), e.Field) + } + return b, err + } + } +} + +func (d decoder) decodeEmbeddedStructPointer(b []byte, p unsafe.Pointer, t reflect.Type, unexported bool, offset uintptr, decode decodeFunc) ([]byte, error) { + v := *(*unsafe.Pointer)(p) + + if v == nil { + if unexported { + return nil, fmt.Errorf("json: cannot set embedded pointer to unexported struct: %s", t) + } + v = unsafe.Pointer(reflect.New(t).Pointer()) + *(*unsafe.Pointer)(p) = v + } + + return decode(d, b, unsafe.Pointer(uintptr(v)+offset)) +} + +func (d decoder) decodePointer(b []byte, p unsafe.Pointer, t reflect.Type, decode decodeFunc) ([]byte, error) { + if hasNullPrefix(b) { + pp := *(*unsafe.Pointer)(p) + if pp != nil && t.Kind() == reflect.Ptr { + return decode(d, b, pp) + } + *(*unsafe.Pointer)(p) = nil + return b[4:], nil + } + + v := *(*unsafe.Pointer)(p) + if v == nil { + v = unsafe.Pointer(reflect.New(t).Pointer()) + *(*unsafe.Pointer)(p) = v + } + + return decode(d, b, v) +} + +func (d decoder) decodeInterface(b []byte, p unsafe.Pointer) ([]byte, error) { + val := *(*interface{})(p) + *(*interface{})(p) = nil + + if t := reflect.TypeOf(val); t != nil && t.Kind() == reflect.Ptr { + if v := reflect.ValueOf(val); v.IsNil() || t.Elem().Kind() != reflect.Ptr { + // If the destination is nil the only value that is OK to decode is + // `null`, and the encoding/json package always nils the destination + // interface value in this case. + if hasNullPrefix(b) { + *(*interface{})(p) = nil + return b[4:], nil + } + } + + b, err := Parse(b, val, d.flags) + if err == nil { + *(*interface{})(p) = val + } + return b, err + } + + v, b, k, err := d.parseValue(b) + if err != nil { + return b, err + } + + switch k.Class() { + case Object: + m := make(map[string]interface{}) + v, err = d.decodeMapStringInterface(v, unsafe.Pointer(&m)) + val = m + + case Array: + a := make([]interface{}, 0, 10) + v, err = d.decodeSlice(v, unsafe.Pointer(&a), unsafe.Sizeof(a[0]), sliceInterfaceType, decoder.decodeInterface) + val = a + + case String: + s := "" + v, err = d.decodeString(v, unsafe.Pointer(&s)) + val = s + + case Null: + v, val = nil, nil + + case Bool: + v, val = nil, k == True + + case Num: + if (d.flags & UseNumber) != 0 { + n := Number("") + v, err = d.decodeNumber(v, unsafe.Pointer(&n)) + val = n + } else { + f := 0.0 + v, err = d.decodeFloat64(v, unsafe.Pointer(&f)) + val = f + } + + default: + return b, syntaxError(v, "expected token but found '%c'", v[0]) + } + + if err != nil { + return b, err + } + + if v = skipSpaces(v); len(v) != 0 { + return b, syntaxError(v, "unexpected trailing trailing tokens after json value") + } + + *(*interface{})(p) = val + return b, nil +} + +func (d decoder) decodeMaybeEmptyInterface(b []byte, p unsafe.Pointer, t reflect.Type) ([]byte, error) { + if hasNullPrefix(b) { + *(*interface{})(p) = nil + return b[4:], nil + } + + if x := reflect.NewAt(t, p).Elem(); !x.IsNil() { + if e := x.Elem(); e.Kind() == reflect.Ptr { + return Parse(b, e.Interface(), d.flags) + } + } else if t.NumMethod() == 0 { // empty interface + return Parse(b, (*interface{})(p), d.flags) + } + + return d.decodeUnmarshalTypeError(b, p, t) +} + +func (d decoder) decodeUnmarshalTypeError(b []byte, p unsafe.Pointer, t reflect.Type) ([]byte, error) { + v, b, _, err := d.parseValue(b) + if err != nil { + return b, err + } + return b, &UnmarshalTypeError{ + Value: string(v), + Type: t, + } +} + +func (d decoder) decodeRawMessage(b []byte, p unsafe.Pointer) ([]byte, error) { + v, r, _, err := d.parseValue(b) + if err != nil { + return d.inputError(b, rawMessageType) + } + + if (d.flags & DontCopyRawMessage) == 0 { + v = append(make([]byte, 0, len(v)), v...) + } + + *(*RawMessage)(p) = json.RawMessage(v) + return r, err +} + +func (d decoder) decodeJSONUnmarshaler(b []byte, p unsafe.Pointer, t reflect.Type, pointer bool) ([]byte, error) { + v, b, _, err := d.parseValue(b) + if err != nil { + return b, err + } + + u := reflect.NewAt(t, p) + if !pointer { + u = u.Elem() + t = t.Elem() + } + if u.IsNil() { + u.Set(reflect.New(t)) + } + + return b, u.Interface().(Unmarshaler).UnmarshalJSON(v) +} + +func (d decoder) decodeTextUnmarshaler(b []byte, p unsafe.Pointer, t reflect.Type, pointer bool) ([]byte, error) { + var value string + + v, b, k, err := d.parseValue(b) + if err != nil { + return b, err + } + if len(v) == 0 { + return d.inputError(v, t) + } + + switch k.Class() { + case Null: + return b, err + + case String: + s, _, _, err := d.parseStringUnquote(v, nil) + if err != nil { + return b, err + } + u := reflect.NewAt(t, p) + if !pointer { + u = u.Elem() + t = t.Elem() + } + if u.IsNil() { + u.Set(reflect.New(t)) + } + return b, u.Interface().(encoding.TextUnmarshaler).UnmarshalText(s) + + case Bool: + if k == True { + value = "true" + } else { + value = "false" + } + + case Num: + value = "number" + + case Object: + value = "object" + + case Array: + value = "array" + } + + return b, &UnmarshalTypeError{Value: value, Type: reflect.PtrTo(t)} +} + +func (d decoder) prependField(key, field string) string { + if field != "" { + return key + "." + field + } + return key +} + +func (d decoder) inputError(b []byte, t reflect.Type) ([]byte, error) { + if len(b) == 0 { + return nil, unexpectedEOF(b) + } + _, r, _, err := d.parseValue(b) + if err != nil { + return r, err + } + return skipSpaces(r), unmarshalTypeError(b, t) +} diff --git a/vendor/github.com/segmentio/encoding/json/encode.go b/vendor/github.com/segmentio/encoding/json/encode.go new file mode 100644 index 00000000000..acb3b67b5a1 --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/encode.go @@ -0,0 +1,990 @@ +package json + +import ( + "encoding" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "sync" + "time" + "unicode/utf8" + "unsafe" + + "github.com/segmentio/asm/base64" +) + +const hex = "0123456789abcdef" + +func (e encoder) encodeNull(b []byte, p unsafe.Pointer) ([]byte, error) { + return append(b, "null"...), nil +} + +func (e encoder) encodeBool(b []byte, p unsafe.Pointer) ([]byte, error) { + if *(*bool)(p) { + return append(b, "true"...), nil + } + return append(b, "false"...), nil +} + +func (e encoder) encodeInt(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendInt(b, int64(*(*int)(p))), nil +} + +func (e encoder) encodeInt8(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendInt(b, int64(*(*int8)(p))), nil +} + +func (e encoder) encodeInt16(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendInt(b, int64(*(*int16)(p))), nil +} + +func (e encoder) encodeInt32(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendInt(b, int64(*(*int32)(p))), nil +} + +func (e encoder) encodeInt64(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendInt(b, *(*int64)(p)), nil +} + +func (e encoder) encodeUint(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendUint(b, uint64(*(*uint)(p))), nil +} + +func (e encoder) encodeUintptr(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendUint(b, uint64(*(*uintptr)(p))), nil +} + +func (e encoder) encodeUint8(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendUint(b, uint64(*(*uint8)(p))), nil +} + +func (e encoder) encodeUint16(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendUint(b, uint64(*(*uint16)(p))), nil +} + +func (e encoder) encodeUint32(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendUint(b, uint64(*(*uint32)(p))), nil +} + +func (e encoder) encodeUint64(b []byte, p unsafe.Pointer) ([]byte, error) { + return appendUint(b, *(*uint64)(p)), nil +} + +func (e encoder) encodeFloat32(b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeFloat(b, float64(*(*float32)(p)), 32) +} + +func (e encoder) encodeFloat64(b []byte, p unsafe.Pointer) ([]byte, error) { + return e.encodeFloat(b, *(*float64)(p), 64) +} + +func (e encoder) encodeFloat(b []byte, f float64, bits int) ([]byte, error) { + switch { + case math.IsNaN(f): + return b, &UnsupportedValueError{Value: reflect.ValueOf(f), Str: "NaN"} + case math.IsInf(f, 0): + return b, &UnsupportedValueError{Value: reflect.ValueOf(f), Str: "inf"} + } + + // Convert as if by ES6 number to string conversion. + // This matches most other JSON generators. + // See golang.org/issue/6384 and golang.org/issue/14135. + // Like fmt %g, but the exponent cutoffs are different + // and exponents themselves are not padded to two digits. + abs := math.Abs(f) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if bits == 64 && (abs < 1e-6 || abs >= 1e21) || bits == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) { + fmt = 'e' + } + } + + b = strconv.AppendFloat(b, f, fmt, -1, int(bits)) + + if fmt == 'e' { + // clean up e-09 to e-9 + n := len(b) + if n >= 4 && b[n-4] == 'e' && b[n-3] == '-' && b[n-2] == '0' { + b[n-2] = b[n-1] + b = b[:n-1] + } + } + + return b, nil +} + +func (e encoder) encodeNumber(b []byte, p unsafe.Pointer) ([]byte, error) { + n := *(*Number)(p) + if n == "" { + n = "0" + } + + d := decoder{} + _, _, _, err := d.parseNumber(stringToBytes(string(n))) + if err != nil { + return b, err + } + + return append(b, n...), nil +} + +func (e encoder) encodeString(b []byte, p unsafe.Pointer) ([]byte, error) { + s := *(*string)(p) + if len(s) == 0 { + return append(b, `""`...), nil + } + i := 0 + j := 0 + escapeHTML := (e.flags & EscapeHTML) != 0 + + b = append(b, '"') + + if len(s) >= 8 { + if j = escapeIndex(s, escapeHTML); j < 0 { + return append(append(b, s...), '"'), nil + } + } + + for j < len(s) { + c := s[j] + + if c >= 0x20 && c <= 0x7f && c != '\\' && c != '"' && (!escapeHTML || (c != '<' && c != '>' && c != '&')) { + // fast path: most of the time, printable ascii characters are used + j++ + continue + } + + switch c { + case '\\', '"': + b = append(b, s[i:j]...) + b = append(b, '\\', c) + i = j + 1 + j = j + 1 + continue + + case '\n': + b = append(b, s[i:j]...) + b = append(b, '\\', 'n') + i = j + 1 + j = j + 1 + continue + + case '\r': + b = append(b, s[i:j]...) + b = append(b, '\\', 'r') + i = j + 1 + j = j + 1 + continue + + case '\t': + b = append(b, s[i:j]...) + b = append(b, '\\', 't') + i = j + 1 + j = j + 1 + continue + + case '<', '>', '&': + b = append(b, s[i:j]...) + b = append(b, `\u00`...) + b = append(b, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + } + + // This encodes bytes < 0x20 except for \t, \n and \r. + if c < 0x20 { + b = append(b, s[i:j]...) + b = append(b, `\u00`...) + b = append(b, hex[c>>4], hex[c&0xF]) + i = j + 1 + j = j + 1 + continue + } + + r, size := utf8.DecodeRuneInString(s[j:]) + + if r == utf8.RuneError && size == 1 { + b = append(b, s[i:j]...) + b = append(b, `\ufffd`...) + i = j + size + j = j + size + continue + } + + switch r { + case '\u2028', '\u2029': + // U+2028 is LINE SEPARATOR. + // U+2029 is PARAGRAPH SEPARATOR. + // They are both technically valid characters in JSON strings, + // but don't work in JSONP, which has to be evaluated as JavaScript, + // and can lead to security holes there. It is valid JSON to + // escape them, so we do so unconditionally. + // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. + b = append(b, s[i:j]...) + b = append(b, `\u202`...) + b = append(b, hex[r&0xF]) + i = j + size + j = j + size + continue + } + + j += size + } + + b = append(b, s[i:]...) + b = append(b, '"') + return b, nil +} + +func (e encoder) encodeToString(b []byte, p unsafe.Pointer, encode encodeFunc) ([]byte, error) { + i := len(b) + + b, err := encode(e, b, p) + if err != nil { + return b, err + } + + j := len(b) + s := b[i:] + + if b, err = e.encodeString(b, unsafe.Pointer(&s)); err != nil { + return b, err + } + + n := copy(b[i:], b[j:]) + return b[:i+n], nil +} + +func (e encoder) encodeBytes(b []byte, p unsafe.Pointer) ([]byte, error) { + v := *(*[]byte)(p) + if v == nil { + return append(b, "null"...), nil + } + + n := base64.StdEncoding.EncodedLen(len(v)) + 2 + + if avail := cap(b) - len(b); avail < n { + newB := make([]byte, cap(b)+(n-avail)) + copy(newB, b) + b = newB[:len(b)] + } + + i := len(b) + j := len(b) + n + + b = b[:j] + b[i] = '"' + base64.StdEncoding.Encode(b[i+1:j-1], v) + b[j-1] = '"' + return b, nil +} + +func (e encoder) encodeDuration(b []byte, p unsafe.Pointer) ([]byte, error) { + b = append(b, '"') + b = appendDuration(b, *(*time.Duration)(p)) + b = append(b, '"') + return b, nil +} + +func (e encoder) encodeTime(b []byte, p unsafe.Pointer) ([]byte, error) { + t := *(*time.Time)(p) + b = append(b, '"') + b = t.AppendFormat(b, time.RFC3339Nano) + b = append(b, '"') + return b, nil +} + +func (e encoder) encodeArray(b []byte, p unsafe.Pointer, n int, size uintptr, t reflect.Type, encode encodeFunc) ([]byte, error) { + var start = len(b) + var err error + b = append(b, '[') + + for i := 0; i < n; i++ { + if i != 0 { + b = append(b, ',') + } + if b, err = encode(e, b, unsafe.Pointer(uintptr(p)+(uintptr(i)*size))); err != nil { + return b[:start], err + } + } + + b = append(b, ']') + return b, nil +} + +func (e encoder) encodeSlice(b []byte, p unsafe.Pointer, size uintptr, t reflect.Type, encode encodeFunc) ([]byte, error) { + s := (*slice)(p) + + if s.data == nil && s.len == 0 && s.cap == 0 { + return append(b, "null"...), nil + } + + return e.encodeArray(b, s.data, s.len, size, t, encode) +} + +func (e encoder) encodeMap(b []byte, p unsafe.Pointer, t reflect.Type, encodeKey, encodeValue encodeFunc, sortKeys sortFunc) ([]byte, error) { + m := reflect.NewAt(t, p).Elem() + if m.IsNil() { + return append(b, "null"...), nil + } + + keys := m.MapKeys() + if sortKeys != nil && (e.flags&SortMapKeys) != 0 { + sortKeys(keys) + } + + var start = len(b) + var err error + b = append(b, '{') + + for i, k := range keys { + v := m.MapIndex(k) + + if i != 0 { + b = append(b, ',') + } + + if b, err = encodeKey(e, b, (*iface)(unsafe.Pointer(&k)).ptr); err != nil { + return b[:start], err + } + + b = append(b, ':') + + if b, err = encodeValue(e, b, (*iface)(unsafe.Pointer(&v)).ptr); err != nil { + return b[:start], err + } + } + + b = append(b, '}') + return b, nil +} + +type element struct { + key string + val interface{} + raw RawMessage +} + +type mapslice struct { + elements []element +} + +func (m *mapslice) Len() int { return len(m.elements) } +func (m *mapslice) Less(i, j int) bool { return m.elements[i].key < m.elements[j].key } +func (m *mapslice) Swap(i, j int) { m.elements[i], m.elements[j] = m.elements[j], m.elements[i] } + +var mapslicePool = sync.Pool{ + New: func() interface{} { return new(mapslice) }, +} + +func (e encoder) encodeMapStringInterface(b []byte, p unsafe.Pointer) ([]byte, error) { + m := *(*map[string]interface{})(p) + if m == nil { + return append(b, "null"...), nil + } + + if (e.flags & SortMapKeys) == 0 { + // Optimized code path when the program does not need the map keys to be + // sorted. + b = append(b, '{') + + if len(m) != 0 { + var err error + var i = 0 + + for k, v := range m { + if i != 0 { + b = append(b, ',') + } + + b, _ = e.encodeString(b, unsafe.Pointer(&k)) + b = append(b, ':') + + b, err = Append(b, v, e.flags) + if err != nil { + return b, err + } + + i++ + } + } + + b = append(b, '}') + return b, nil + } + + s := mapslicePool.Get().(*mapslice) + if cap(s.elements) < len(m) { + s.elements = make([]element, 0, align(10, uintptr(len(m)))) + } + for key, val := range m { + s.elements = append(s.elements, element{key: key, val: val}) + } + sort.Sort(s) + + var start = len(b) + var err error + b = append(b, '{') + + for i, elem := range s.elements { + if i != 0 { + b = append(b, ',') + } + + b, _ = e.encodeString(b, unsafe.Pointer(&elem.key)) + b = append(b, ':') + + b, err = Append(b, elem.val, e.flags) + if err != nil { + break + } + } + + for i := range s.elements { + s.elements[i] = element{} + } + + s.elements = s.elements[:0] + mapslicePool.Put(s) + + if err != nil { + return b[:start], err + } + + b = append(b, '}') + return b, nil +} + +func (e encoder) encodeMapStringRawMessage(b []byte, p unsafe.Pointer) ([]byte, error) { + m := *(*map[string]RawMessage)(p) + if m == nil { + return append(b, "null"...), nil + } + + if (e.flags & SortMapKeys) == 0 { + // Optimized code path when the program does not need the map keys to be + // sorted. + b = append(b, '{') + + if len(m) != 0 { + var err error + var i = 0 + + for k, v := range m { + if i != 0 { + b = append(b, ',') + } + + // encodeString doesn't return errors so we ignore it here + b, _ = e.encodeString(b, unsafe.Pointer(&k)) + b = append(b, ':') + + b, err = e.encodeRawMessage(b, unsafe.Pointer(&v)) + if err != nil { + break + } + + i++ + } + } + + b = append(b, '}') + return b, nil + } + + s := mapslicePool.Get().(*mapslice) + if cap(s.elements) < len(m) { + s.elements = make([]element, 0, align(10, uintptr(len(m)))) + } + for key, raw := range m { + s.elements = append(s.elements, element{key: key, raw: raw}) + } + sort.Sort(s) + + var start = len(b) + var err error + b = append(b, '{') + + for i, elem := range s.elements { + if i != 0 { + b = append(b, ',') + } + + b, _ = e.encodeString(b, unsafe.Pointer(&elem.key)) + b = append(b, ':') + + b, err = e.encodeRawMessage(b, unsafe.Pointer(&elem.raw)) + if err != nil { + break + } + } + + for i := range s.elements { + s.elements[i] = element{} + } + + s.elements = s.elements[:0] + mapslicePool.Put(s) + + if err != nil { + return b[:start], err + } + + b = append(b, '}') + return b, nil +} + +func (e encoder) encodeMapStringString(b []byte, p unsafe.Pointer) ([]byte, error) { + m := *(*map[string]string)(p) + if m == nil { + return append(b, "null"...), nil + } + + if (e.flags & SortMapKeys) == 0 { + // Optimized code path when the program does not need the map keys to be + // sorted. + b = append(b, '{') + + if len(m) != 0 { + var i = 0 + + for k, v := range m { + if i != 0 { + b = append(b, ',') + } + + // encodeString never returns an error so we ignore it here + b, _ = e.encodeString(b, unsafe.Pointer(&k)) + b = append(b, ':') + b, _ = e.encodeString(b, unsafe.Pointer(&v)) + + i++ + } + } + + b = append(b, '}') + return b, nil + } + + s := mapslicePool.Get().(*mapslice) + if cap(s.elements) < len(m) { + s.elements = make([]element, 0, align(10, uintptr(len(m)))) + } + for key, val := range m { + v := val + s.elements = append(s.elements, element{key: key, val: &v}) + } + sort.Sort(s) + + b = append(b, '{') + + for i, elem := range s.elements { + if i != 0 { + b = append(b, ',') + } + + // encodeString never returns an error so we ignore it here + b, _ = e.encodeString(b, unsafe.Pointer(&elem.key)) + b = append(b, ':') + b, _ = e.encodeString(b, unsafe.Pointer(elem.val.(*string))) + } + + for i := range s.elements { + s.elements[i] = element{} + } + + s.elements = s.elements[:0] + mapslicePool.Put(s) + + b = append(b, '}') + return b, nil +} + +func (e encoder) encodeMapStringStringSlice(b []byte, p unsafe.Pointer) ([]byte, error) { + m := *(*map[string][]string)(p) + if m == nil { + return append(b, "null"...), nil + } + + var stringSize = unsafe.Sizeof("") + + if (e.flags & SortMapKeys) == 0 { + // Optimized code path when the program does not need the map keys to be + // sorted. + b = append(b, '{') + + if len(m) != 0 { + var err error + var i = 0 + + for k, v := range m { + if i != 0 { + b = append(b, ',') + } + + b, _ = e.encodeString(b, unsafe.Pointer(&k)) + b = append(b, ':') + + b, err = e.encodeSlice(b, unsafe.Pointer(&v), stringSize, sliceStringType, encoder.encodeString) + if err != nil { + return b, err + } + + i++ + } + } + + b = append(b, '}') + return b, nil + } + + s := mapslicePool.Get().(*mapslice) + if cap(s.elements) < len(m) { + s.elements = make([]element, 0, align(10, uintptr(len(m)))) + } + for key, val := range m { + v := val + s.elements = append(s.elements, element{key: key, val: &v}) + } + sort.Sort(s) + + var start = len(b) + var err error + b = append(b, '{') + + for i, elem := range s.elements { + if i != 0 { + b = append(b, ',') + } + + b, _ = e.encodeString(b, unsafe.Pointer(&elem.key)) + b = append(b, ':') + + b, err = e.encodeSlice(b, unsafe.Pointer(elem.val.(*[]string)), stringSize, sliceStringType, encoder.encodeString) + if err != nil { + break + } + } + + for i := range s.elements { + s.elements[i] = element{} + } + + s.elements = s.elements[:0] + mapslicePool.Put(s) + + if err != nil { + return b[:start], err + } + + b = append(b, '}') + return b, nil +} + +func (e encoder) encodeMapStringBool(b []byte, p unsafe.Pointer) ([]byte, error) { + m := *(*map[string]bool)(p) + if m == nil { + return append(b, "null"...), nil + } + + if (e.flags & SortMapKeys) == 0 { + // Optimized code path when the program does not need the map keys to be + // sorted. + b = append(b, '{') + + if len(m) != 0 { + var i = 0 + + for k, v := range m { + if i != 0 { + b = append(b, ',') + } + + // encodeString never returns an error so we ignore it here + b, _ = e.encodeString(b, unsafe.Pointer(&k)) + if v { + b = append(b, ":true"...) + } else { + b = append(b, ":false"...) + } + + i++ + } + } + + b = append(b, '}') + return b, nil + } + + s := mapslicePool.Get().(*mapslice) + if cap(s.elements) < len(m) { + s.elements = make([]element, 0, align(10, uintptr(len(m)))) + } + for key, val := range m { + s.elements = append(s.elements, element{key: key, val: val}) + } + sort.Sort(s) + + b = append(b, '{') + + for i, elem := range s.elements { + if i != 0 { + b = append(b, ',') + } + + // encodeString never returns an error so we ignore it here + b, _ = e.encodeString(b, unsafe.Pointer(&elem.key)) + if elem.val.(bool) { + b = append(b, ":true"...) + } else { + b = append(b, ":false"...) + } + } + + for i := range s.elements { + s.elements[i] = element{} + } + + s.elements = s.elements[:0] + mapslicePool.Put(s) + + b = append(b, '}') + return b, nil +} + +func (e encoder) encodeStruct(b []byte, p unsafe.Pointer, st *structType) ([]byte, error) { + var start = len(b) + var err error + var k string + var n int + b = append(b, '{') + + escapeHTML := (e.flags & EscapeHTML) != 0 + + for i := range st.fields { + f := &st.fields[i] + v := unsafe.Pointer(uintptr(p) + f.offset) + + if f.omitempty && f.empty(v) { + continue + } + + if escapeHTML { + k = f.html + } else { + k = f.json + } + + lengthBeforeKey := len(b) + + if n != 0 { + b = append(b, k...) + } else { + b = append(b, k[1:]...) + } + + if b, err = f.codec.encode(e, b, v); err != nil { + if err == (rollback{}) { + b = b[:lengthBeforeKey] + continue + } + return b[:start], err + } + + n++ + } + + b = append(b, '}') + return b, nil +} + +type rollback struct{} + +func (rollback) Error() string { return "rollback" } + +func (e encoder) encodeEmbeddedStructPointer(b []byte, p unsafe.Pointer, t reflect.Type, unexported bool, offset uintptr, encode encodeFunc) ([]byte, error) { + p = *(*unsafe.Pointer)(p) + if p == nil { + return b, rollback{} + } + return encode(e, b, unsafe.Pointer(uintptr(p)+offset)) +} + +func (e encoder) encodePointer(b []byte, p unsafe.Pointer, t reflect.Type, encode encodeFunc) ([]byte, error) { + if p = *(*unsafe.Pointer)(p); p != nil { + if e.ptrDepth++; e.ptrDepth >= startDetectingCyclesAfter { + if _, seen := e.ptrSeen[p]; seen { + // TODO: reconstruct the reflect.Value from p + t so we can set + // the erorr's Value field? + return b, &UnsupportedValueError{Str: fmt.Sprintf("encountered a cycle via %s", t)} + } + if e.ptrSeen == nil { + e.ptrSeen = make(map[unsafe.Pointer]struct{}) + } + e.ptrSeen[p] = struct{}{} + defer delete(e.ptrSeen, p) + } + return encode(e, b, p) + } + return e.encodeNull(b, nil) +} + +func (e encoder) encodeInterface(b []byte, p unsafe.Pointer) ([]byte, error) { + return Append(b, *(*interface{})(p), e.flags) +} + +func (e encoder) encodeMaybeEmptyInterface(b []byte, p unsafe.Pointer, t reflect.Type) ([]byte, error) { + return Append(b, reflect.NewAt(t, p).Elem().Interface(), e.flags) +} + +func (e encoder) encodeUnsupportedTypeError(b []byte, p unsafe.Pointer, t reflect.Type) ([]byte, error) { + return b, &UnsupportedTypeError{Type: t} +} + +func (e encoder) encodeRawMessage(b []byte, p unsafe.Pointer) ([]byte, error) { + v := *(*RawMessage)(p) + + if v == nil { + return append(b, "null"...), nil + } + + var s []byte + + if (e.flags & TrustRawMessage) != 0 { + s = v + } else { + var err error + d := decoder{} + s, _, _, err = d.parseValue(v) + if err != nil { + return b, &UnsupportedValueError{Value: reflect.ValueOf(v), Str: err.Error()} + } + } + + if (e.flags & EscapeHTML) != 0 { + return appendCompactEscapeHTML(b, s), nil + } + + return append(b, s...), nil +} + +func (e encoder) encodeJSONMarshaler(b []byte, p unsafe.Pointer, t reflect.Type, pointer bool) ([]byte, error) { + v := reflect.NewAt(t, p) + + if !pointer { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + if v.IsNil() { + return append(b, "null"...), nil + } + } + + j, err := v.Interface().(Marshaler).MarshalJSON() + if err != nil { + return b, err + } + + d := decoder{} + s, _, _, err := d.parseValue(j) + if err != nil { + return b, &MarshalerError{Type: t, Err: err} + } + + if (e.flags & EscapeHTML) != 0 { + return appendCompactEscapeHTML(b, s), nil + } + + return append(b, s...), nil +} + +func (e encoder) encodeTextMarshaler(b []byte, p unsafe.Pointer, t reflect.Type, pointer bool) ([]byte, error) { + v := reflect.NewAt(t, p) + + if !pointer { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + if v.IsNil() { + return append(b, `null`...), nil + } + } + + s, err := v.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return b, err + } + + return e.encodeString(b, unsafe.Pointer(&s)) +} + +func appendCompactEscapeHTML(dst []byte, src []byte) []byte { + start := 0 + escape := false + inString := false + + for i, c := range src { + if !inString { + switch c { + case '"': // enter string + inString = true + case ' ', '\n', '\r', '\t': // skip space + if start < i { + dst = append(dst, src[start:i]...) + } + start = i + 1 + } + continue + } + + if escape { + escape = false + continue + } + + if c == '\\' { + escape = true + continue + } + + if c == '"' { + inString = false + continue + } + + if c == '<' || c == '>' || c == '&' { + if start < i { + dst = append(dst, src[start:i]...) + } + dst = append(dst, `\u00`...) + dst = append(dst, hex[c>>4], hex[c&0xF]) + start = i + 1 + continue + } + + // Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9). + if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 { + if start < i { + dst = append(dst, src[start:i]...) + } + dst = append(dst, `\u202`...) + dst = append(dst, hex[src[i+2]&0xF]) + start = i + 3 + continue + } + } + + if start < len(src) { + dst = append(dst, src[start:]...) + } + + return dst +} diff --git a/vendor/github.com/segmentio/encoding/json/int.go b/vendor/github.com/segmentio/encoding/json/int.go new file mode 100644 index 00000000000..b53149cbd7a --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/int.go @@ -0,0 +1,98 @@ +package json + +import ( + "unsafe" +) + +var endianness int + +func init() { + var b [2]byte + *(*uint16)(unsafe.Pointer(&b)) = uint16(0xABCD) + + switch b[0] { + case 0xCD: + endianness = 0 // LE + case 0xAB: + endianness = 1 // BE + default: + panic("could not determine endianness") + } +} + +// "00010203...96979899" cast to []uint16 +var intLELookup = [100]uint16{ + 0x3030, 0x3130, 0x3230, 0x3330, 0x3430, 0x3530, 0x3630, 0x3730, 0x3830, 0x3930, + 0x3031, 0x3131, 0x3231, 0x3331, 0x3431, 0x3531, 0x3631, 0x3731, 0x3831, 0x3931, + 0x3032, 0x3132, 0x3232, 0x3332, 0x3432, 0x3532, 0x3632, 0x3732, 0x3832, 0x3932, + 0x3033, 0x3133, 0x3233, 0x3333, 0x3433, 0x3533, 0x3633, 0x3733, 0x3833, 0x3933, + 0x3034, 0x3134, 0x3234, 0x3334, 0x3434, 0x3534, 0x3634, 0x3734, 0x3834, 0x3934, + 0x3035, 0x3135, 0x3235, 0x3335, 0x3435, 0x3535, 0x3635, 0x3735, 0x3835, 0x3935, + 0x3036, 0x3136, 0x3236, 0x3336, 0x3436, 0x3536, 0x3636, 0x3736, 0x3836, 0x3936, + 0x3037, 0x3137, 0x3237, 0x3337, 0x3437, 0x3537, 0x3637, 0x3737, 0x3837, 0x3937, + 0x3038, 0x3138, 0x3238, 0x3338, 0x3438, 0x3538, 0x3638, 0x3738, 0x3838, 0x3938, + 0x3039, 0x3139, 0x3239, 0x3339, 0x3439, 0x3539, 0x3639, 0x3739, 0x3839, 0x3939, +} + +var intBELookup = [100]uint16{ + 0x3030, 0x3031, 0x3032, 0x3033, 0x3034, 0x3035, 0x3036, 0x3037, 0x3038, 0x3039, + 0x3130, 0x3131, 0x3132, 0x3133, 0x3134, 0x3135, 0x3136, 0x3137, 0x3138, 0x3139, + 0x3230, 0x3231, 0x3232, 0x3233, 0x3234, 0x3235, 0x3236, 0x3237, 0x3238, 0x3239, + 0x3330, 0x3331, 0x3332, 0x3333, 0x3334, 0x3335, 0x3336, 0x3337, 0x3338, 0x3339, + 0x3430, 0x3431, 0x3432, 0x3433, 0x3434, 0x3435, 0x3436, 0x3437, 0x3438, 0x3439, + 0x3530, 0x3531, 0x3532, 0x3533, 0x3534, 0x3535, 0x3536, 0x3537, 0x3538, 0x3539, + 0x3630, 0x3631, 0x3632, 0x3633, 0x3634, 0x3635, 0x3636, 0x3637, 0x3638, 0x3639, + 0x3730, 0x3731, 0x3732, 0x3733, 0x3734, 0x3735, 0x3736, 0x3737, 0x3738, 0x3739, + 0x3830, 0x3831, 0x3832, 0x3833, 0x3834, 0x3835, 0x3836, 0x3837, 0x3838, 0x3839, + 0x3930, 0x3931, 0x3932, 0x3933, 0x3934, 0x3935, 0x3936, 0x3937, 0x3938, 0x3939, +} + +var intLookup = [2]*[100]uint16{&intLELookup, &intBELookup} + +func appendInt(b []byte, n int64) []byte { + return formatInteger(b, uint64(n), n < 0) +} + +func appendUint(b []byte, n uint64) []byte { + return formatInteger(b, n, false) +} + +func formatInteger(out []byte, n uint64, negative bool) []byte { + if !negative { + if n < 10 { + return append(out, byte(n+'0')) + } else if n < 100 { + u := intLELookup[n] + return append(out, byte(u), byte(u>>8)) + } + } else { + n = -n + } + + lookup := intLookup[endianness] + + var b [22]byte + u := (*[11]uint16)(unsafe.Pointer(&b)) + i := 11 + + for n >= 100 { + j := n % 100 + n /= 100 + i-- + u[i] = lookup[j] + } + + i-- + u[i] = lookup[n] + + i *= 2 // convert to byte index + if n < 10 { + i++ // remove leading zero + } + if negative { + i-- + b[i] = '-' + } + + return append(out, b[i:]...) +} diff --git a/vendor/github.com/segmentio/encoding/json/json.go b/vendor/github.com/segmentio/encoding/json/json.go new file mode 100644 index 00000000000..47f3ba1732a --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/json.go @@ -0,0 +1,582 @@ +package json + +import ( + "bytes" + "encoding/json" + "io" + "math/bits" + "reflect" + "runtime" + "sync" + "unsafe" +) + +// Delim is documented at https://golang.org/pkg/encoding/json/#Delim +type Delim = json.Delim + +// InvalidUTF8Error is documented at https://golang.org/pkg/encoding/json/#InvalidUTF8Error +type InvalidUTF8Error = json.InvalidUTF8Error + +// InvalidUnmarshalError is documented at https://golang.org/pkg/encoding/json/#InvalidUnmarshalError +type InvalidUnmarshalError = json.InvalidUnmarshalError + +// Marshaler is documented at https://golang.org/pkg/encoding/json/#Marshaler +type Marshaler = json.Marshaler + +// MarshalerError is documented at https://golang.org/pkg/encoding/json/#MarshalerError +type MarshalerError = json.MarshalerError + +// Number is documented at https://golang.org/pkg/encoding/json/#Number +type Number = json.Number + +// RawMessage is documented at https://golang.org/pkg/encoding/json/#RawMessage +type RawMessage = json.RawMessage + +// A SyntaxError is a description of a JSON syntax error. +type SyntaxError = json.SyntaxError + +// Token is documented at https://golang.org/pkg/encoding/json/#Token +type Token = json.Token + +// UnmarshalFieldError is documented at https://golang.org/pkg/encoding/json/#UnmarshalFieldError +type UnmarshalFieldError = json.UnmarshalFieldError + +// UnmarshalTypeError is documented at https://golang.org/pkg/encoding/json/#UnmarshalTypeError +type UnmarshalTypeError = json.UnmarshalTypeError + +// Unmarshaler is documented at https://golang.org/pkg/encoding/json/#Unmarshaler +type Unmarshaler = json.Unmarshaler + +// UnsupportedTypeError is documented at https://golang.org/pkg/encoding/json/#UnsupportedTypeError +type UnsupportedTypeError = json.UnsupportedTypeError + +// UnsupportedValueError is documented at https://golang.org/pkg/encoding/json/#UnsupportedValueError +type UnsupportedValueError = json.UnsupportedValueError + +// AppendFlags is a type used to represent configuration options that can be +// applied when formatting json output. +type AppendFlags uint32 + +const ( + // EscapeHTML is a formatting flag used to to escape HTML in json strings. + EscapeHTML AppendFlags = 1 << iota + + // SortMapKeys is formatting flag used to enable sorting of map keys when + // encoding JSON (this matches the behavior of the standard encoding/json + // package). + SortMapKeys + + // TrustRawMessage is a performance optimization flag to skip value + // checking of raw messages. It should only be used if the values are + // known to be valid json (e.g., they were created by json.Unmarshal). + TrustRawMessage + + // appendNewline is a formatting flag to enable the addition of a newline + // in Encode (this matches the behavior of the standard encoding/json + // package). + appendNewline +) + +// ParseFlags is a type used to represent configuration options that can be +// applied when parsing json input. +type ParseFlags uint32 + +func (flags ParseFlags) has(f ParseFlags) bool { + return (flags & f) != 0 +} + +func (f ParseFlags) kind() Kind { + return Kind((f >> kindOffset) & 0xFF) +} + +func (f ParseFlags) withKind(kind Kind) ParseFlags { + return (f & ^(ParseFlags(0xFF) << kindOffset)) | (ParseFlags(kind) << kindOffset) +} + +const ( + // DisallowUnknownFields is a parsing flag used to prevent decoding of + // objects to Go struct values when a field of the input does not match + // with any of the struct fields. + DisallowUnknownFields ParseFlags = 1 << iota + + // UseNumber is a parsing flag used to load numeric values as Number + // instead of float64. + UseNumber + + // DontCopyString is a parsing flag used to provide zero-copy support when + // loading string values from a json payload. It is not always possible to + // avoid dynamic memory allocations, for example when a string is escaped in + // the json data a new buffer has to be allocated, but when the `wire` value + // can be used as content of a Go value the decoder will simply point into + // the input buffer. + DontCopyString + + // DontCopyNumber is a parsing flag used to provide zero-copy support when + // loading Number values (see DontCopyString and DontCopyRawMessage). + DontCopyNumber + + // DontCopyRawMessage is a parsing flag used to provide zero-copy support + // when loading RawMessage values from a json payload. When used, the + // RawMessage values will not be allocated into new memory buffers and + // will instead point directly to the area of the input buffer where the + // value was found. + DontCopyRawMessage + + // DontMatchCaseInsensitiveStructFields is a parsing flag used to prevent + // matching fields in a case-insensitive way. This can prevent degrading + // performance on case conversions, and can also act as a stricter decoding + // mode. + DontMatchCaseInsensitiveStructFields + + // ZeroCopy is a parsing flag that combines all the copy optimizations + // available in the package. + // + // The zero-copy optimizations are better used in request-handler style + // code where none of the values are retained after the handler returns. + ZeroCopy = DontCopyString | DontCopyNumber | DontCopyRawMessage + + // validAsciiPrint is an internal flag indicating that the input contains + // only valid ASCII print chars (0x20 <= c <= 0x7E). If the flag is unset, + // it's unknown whether the input is valid ASCII print. + validAsciiPrint ParseFlags = 1 << 28 + + // noBackslach is an internal flag indicating that the input does not + // contain a backslash. If the flag is unset, it's unknown whether the + // input contains a backslash. + noBackslash ParseFlags = 1 << 29 + + // Bit offset where the kind of the json value is stored. + // + // See Kind in token.go for the enum. + kindOffset ParseFlags = 16 +) + +// Kind represents the different kinds of value that exist in JSON. +type Kind uint + +const ( + Undefined Kind = 0 + + Null Kind = 1 // Null is not zero, so we keep zero for "undefined". + + Bool Kind = 2 // Bit two is set to 1, means it's a boolean. + False Kind = 2 // Bool + 0 + True Kind = 3 // Bool + 1 + + Num Kind = 4 // Bit three is set to 1, means it's a number. + Uint Kind = 5 // Num + 1 + Int Kind = 6 // Num + 2 + Float Kind = 7 // Num + 3 + + String Kind = 8 // Bit four is set to 1, means it's a string. + Unescaped Kind = 9 // String + 1 + + Array Kind = 16 // Equivalent to Delim == '[' + Object Kind = 32 // Equivalent to Delim == '{' +) + +// Class returns the class of k. +func (k Kind) Class() Kind { return Kind(1 << uint(bits.Len(uint(k))-1)) } + +// Append acts like Marshal but appends the json representation to b instead of +// always reallocating a new slice. +func Append(b []byte, x interface{}, flags AppendFlags) ([]byte, error) { + if x == nil { + // Special case for nil values because it makes the rest of the code + // simpler to assume that it won't be seeing nil pointers. + return append(b, "null"...), nil + } + + t := reflect.TypeOf(x) + p := (*iface)(unsafe.Pointer(&x)).ptr + + cache := cacheLoad() + c, found := cache[typeid(t)] + + if !found { + c = constructCachedCodec(t, cache) + } + + b, err := c.encode(encoder{flags: flags}, b, p) + runtime.KeepAlive(x) + return b, err +} + +// Escape is a convenience helper to construct an escaped JSON string from s. +// The function escales HTML characters, for more control over the escape +// behavior and to write to a pre-allocated buffer, use AppendEscape. +func Escape(s string) []byte { + // +10 for extra escape characters, maybe not enough and the buffer will + // be reallocated. + b := make([]byte, 0, len(s)+10) + return AppendEscape(b, s, EscapeHTML) +} + +// AppendEscape appends s to b with the string escaped as a JSON value. +// This will include the starting and ending quote characters, and the +// appropriate characters will be escaped correctly for JSON encoding. +func AppendEscape(b []byte, s string, flags AppendFlags) []byte { + e := encoder{flags: flags} + b, _ = e.encodeString(b, unsafe.Pointer(&s)) + return b +} + +// Unescape is a convenience helper to unescape a JSON value. +// For more control over the unescape behavior and +// to write to a pre-allocated buffer, use AppendUnescape. +func Unescape(s []byte) []byte { + b := make([]byte, 0, len(s)) + return AppendUnescape(b, s, ParseFlags(0)) +} + +// AppendUnescape appends s to b with the string unescaped as a JSON value. +// This will remove starting and ending quote characters, and the +// appropriate characters will be escaped correctly as if JSON decoded. +// New space will be reallocated if more space is needed. +func AppendUnescape(b []byte, s []byte, flags ParseFlags) []byte { + d := decoder{flags: flags} + buf := new(string) + d.decodeString(s, unsafe.Pointer(buf)) + return append(b, *buf...) +} + +// Compact is documented at https://golang.org/pkg/encoding/json/#Compact +func Compact(dst *bytes.Buffer, src []byte) error { + return json.Compact(dst, src) +} + +// HTMLEscape is documented at https://golang.org/pkg/encoding/json/#HTMLEscape +func HTMLEscape(dst *bytes.Buffer, src []byte) { + json.HTMLEscape(dst, src) +} + +// Indent is documented at https://golang.org/pkg/encoding/json/#Indent +func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error { + return json.Indent(dst, src, prefix, indent) +} + +// Marshal is documented at https://golang.org/pkg/encoding/json/#Marshal +func Marshal(x interface{}) ([]byte, error) { + var err error + var buf = encoderBufferPool.Get().(*encoderBuffer) + + if buf.data, err = Append(buf.data[:0], x, EscapeHTML|SortMapKeys); err != nil { + return nil, err + } + + b := make([]byte, len(buf.data)) + copy(b, buf.data) + encoderBufferPool.Put(buf) + return b, nil +} + +// MarshalIndent is documented at https://golang.org/pkg/encoding/json/#MarshalIndent +func MarshalIndent(x interface{}, prefix, indent string) ([]byte, error) { + b, err := Marshal(x) + + if err == nil { + tmp := &bytes.Buffer{} + tmp.Grow(2 * len(b)) + + Indent(tmp, b, prefix, indent) + b = tmp.Bytes() + } + + return b, err +} + +// Unmarshal is documented at https://golang.org/pkg/encoding/json/#Unmarshal +func Unmarshal(b []byte, x interface{}) error { + r, err := Parse(b, x, 0) + if len(r) != 0 { + if _, ok := err.(*SyntaxError); !ok { + // The encoding/json package prioritizes reporting errors caused by + // unexpected trailing bytes over other issues; here we emulate this + // behavior by overriding the error. + err = syntaxError(r, "invalid character '%c' after top-level value", r[0]) + } + } + return err +} + +// Parse behaves like Unmarshal but the caller can pass a set of flags to +// configure the parsing behavior. +func Parse(b []byte, x interface{}, flags ParseFlags) ([]byte, error) { + t := reflect.TypeOf(x) + p := (*iface)(unsafe.Pointer(&x)).ptr + + d := decoder{flags: flags | internalParseFlags(b)} + + b = skipSpaces(b) + + if t == nil || p == nil || t.Kind() != reflect.Ptr { + _, r, _, err := d.parseValue(b) + r = skipSpaces(r) + if err != nil { + return r, err + } + return r, &InvalidUnmarshalError{Type: t} + } + t = t.Elem() + + cache := cacheLoad() + c, found := cache[typeid(t)] + + if !found { + c = constructCachedCodec(t, cache) + } + + r, err := c.decode(d, b, p) + return skipSpaces(r), err +} + +// Valid is documented at https://golang.org/pkg/encoding/json/#Valid +func Valid(data []byte) bool { + data = skipSpaces(data) + d := decoder{flags: internalParseFlags(data)} + _, data, _, err := d.parseValue(data) + if err != nil { + return false + } + return len(skipSpaces(data)) == 0 +} + +// Decoder is documented at https://golang.org/pkg/encoding/json/#Decoder +type Decoder struct { + reader io.Reader + buffer []byte + remain []byte + inputOffset int64 + err error + flags ParseFlags +} + +// NewDecoder is documented at https://golang.org/pkg/encoding/json/#NewDecoder +func NewDecoder(r io.Reader) *Decoder { return &Decoder{reader: r} } + +// Buffered is documented at https://golang.org/pkg/encoding/json/#Decoder.Buffered +func (dec *Decoder) Buffered() io.Reader { + return bytes.NewReader(dec.remain) +} + +// Decode is documented at https://golang.org/pkg/encoding/json/#Decoder.Decode +func (dec *Decoder) Decode(v interface{}) error { + raw, err := dec.readValue() + if err != nil { + return err + } + _, err = Parse(raw, v, dec.flags) + return err +} + +const ( + minBufferSize = 32768 + minReadSize = 4096 +) + +// readValue reads one JSON value from the buffer and returns its raw bytes. It +// is optimized for the "one JSON value per line" case. +func (dec *Decoder) readValue() (v []byte, err error) { + var n int + var r []byte + d := decoder{flags: dec.flags} + + for { + if len(dec.remain) != 0 { + v, r, _, err = d.parseValue(dec.remain) + if err == nil { + dec.remain, n = skipSpacesN(r) + dec.inputOffset += int64(len(v) + n) + return + } + if len(r) != 0 { + // Parsing of the next JSON value stopped at a position other + // than the end of the input buffer, which indicaates that a + // syntax error was encountered. + return + } + } + + if err = dec.err; err != nil { + if len(dec.remain) != 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return + } + + if dec.buffer == nil { + dec.buffer = make([]byte, 0, minBufferSize) + } else { + dec.buffer = dec.buffer[:copy(dec.buffer[:cap(dec.buffer)], dec.remain)] + dec.remain = nil + } + + if (cap(dec.buffer) - len(dec.buffer)) < minReadSize { + buf := make([]byte, len(dec.buffer), 2*cap(dec.buffer)) + copy(buf, dec.buffer) + dec.buffer = buf + } + + n, err = io.ReadFull(dec.reader, dec.buffer[len(dec.buffer):cap(dec.buffer)]) + if n > 0 { + dec.buffer = dec.buffer[:len(dec.buffer)+n] + if err != nil { + err = nil + } + } else if err == io.ErrUnexpectedEOF { + err = io.EOF + } + dec.remain, n = skipSpacesN(dec.buffer) + d.flags = dec.flags | internalParseFlags(dec.remain) + dec.inputOffset += int64(n) + dec.err = err + } +} + +// DisallowUnknownFields is documented at https://golang.org/pkg/encoding/json/#Decoder.DisallowUnknownFields +func (dec *Decoder) DisallowUnknownFields() { dec.flags |= DisallowUnknownFields } + +// UseNumber is documented at https://golang.org/pkg/encoding/json/#Decoder.UseNumber +func (dec *Decoder) UseNumber() { dec.flags |= UseNumber } + +// DontCopyString is an extension to the standard encoding/json package +// which instructs the decoder to not copy strings loaded from the json +// payloads when possible. +func (dec *Decoder) DontCopyString() { dec.flags |= DontCopyString } + +// DontCopyNumber is an extension to the standard encoding/json package +// which instructs the decoder to not copy numbers loaded from the json +// payloads. +func (dec *Decoder) DontCopyNumber() { dec.flags |= DontCopyNumber } + +// DontCopyRawMessage is an extension to the standard encoding/json package +// which instructs the decoder to not allocate RawMessage values in separate +// memory buffers (see the documentation of the DontcopyRawMessage flag for +// more detais). +func (dec *Decoder) DontCopyRawMessage() { dec.flags |= DontCopyRawMessage } + +// DontMatchCaseInsensitiveStructFields is an extension to the standard +// encoding/json package which instructs the decoder to not match object fields +// against struct fields in a case-insensitive way, the field names have to +// match exactly to be decoded into the struct field values. +func (dec *Decoder) DontMatchCaseInsensitiveStructFields() { + dec.flags |= DontMatchCaseInsensitiveStructFields +} + +// ZeroCopy is an extension to the standard encoding/json package which enables +// all the copy optimizations of the decoder. +func (dec *Decoder) ZeroCopy() { dec.flags |= ZeroCopy } + +// InputOffset returns the input stream byte offset of the current decoder position. +// The offset gives the location of the end of the most recently returned token +// and the beginning of the next token. +func (dec *Decoder) InputOffset() int64 { + return dec.inputOffset +} + +// Encoder is documented at https://golang.org/pkg/encoding/json/#Encoder +type Encoder struct { + writer io.Writer + prefix string + indent string + buffer *bytes.Buffer + err error + flags AppendFlags +} + +// NewEncoder is documented at https://golang.org/pkg/encoding/json/#NewEncoder +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{writer: w, flags: EscapeHTML | SortMapKeys | appendNewline} +} + +// Encode is documented at https://golang.org/pkg/encoding/json/#Encoder.Encode +func (enc *Encoder) Encode(v interface{}) error { + if enc.err != nil { + return enc.err + } + + var err error + var buf = encoderBufferPool.Get().(*encoderBuffer) + + buf.data, err = Append(buf.data[:0], v, enc.flags) + + if err != nil { + encoderBufferPool.Put(buf) + return err + } + + if (enc.flags & appendNewline) != 0 { + buf.data = append(buf.data, '\n') + } + b := buf.data + + if enc.prefix != "" || enc.indent != "" { + if enc.buffer == nil { + enc.buffer = new(bytes.Buffer) + enc.buffer.Grow(2 * len(buf.data)) + } else { + enc.buffer.Reset() + } + Indent(enc.buffer, buf.data, enc.prefix, enc.indent) + b = enc.buffer.Bytes() + } + + if _, err := enc.writer.Write(b); err != nil { + enc.err = err + } + + encoderBufferPool.Put(buf) + return err +} + +// SetEscapeHTML is documented at https://golang.org/pkg/encoding/json/#Encoder.SetEscapeHTML +func (enc *Encoder) SetEscapeHTML(on bool) { + if on { + enc.flags |= EscapeHTML + } else { + enc.flags &= ^EscapeHTML + } +} + +// SetIndent is documented at https://golang.org/pkg/encoding/json/#Encoder.SetIndent +func (enc *Encoder) SetIndent(prefix, indent string) { + enc.prefix = prefix + enc.indent = indent +} + +// SetSortMapKeys is an extension to the standard encoding/json package which +// allows the program to toggle sorting of map keys on and off. +func (enc *Encoder) SetSortMapKeys(on bool) { + if on { + enc.flags |= SortMapKeys + } else { + enc.flags &= ^SortMapKeys + } +} + +// SetTrustRawMessage skips value checking when encoding a raw json message. It should only +// be used if the values are known to be valid json, e.g. because they were originally created +// by json.Unmarshal. +func (enc *Encoder) SetTrustRawMessage(on bool) { + if on { + enc.flags |= TrustRawMessage + } else { + enc.flags &= ^TrustRawMessage + } +} + +// SetAppendNewline is an extension to the standard encoding/json package which +// allows the program to toggle the addition of a newline in Encode on or off. +func (enc *Encoder) SetAppendNewline(on bool) { + if on { + enc.flags |= appendNewline + } else { + enc.flags &= ^appendNewline + } +} + +var encoderBufferPool = sync.Pool{ + New: func() interface{} { return &encoderBuffer{data: make([]byte, 0, 4096)} }, +} + +type encoderBuffer struct{ data []byte } diff --git a/vendor/github.com/segmentio/encoding/json/parse.go b/vendor/github.com/segmentio/encoding/json/parse.go new file mode 100644 index 00000000000..3e656217ec7 --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/parse.go @@ -0,0 +1,787 @@ +package json + +import ( + "bytes" + "encoding/binary" + "math" + "math/bits" + "reflect" + "unicode" + "unicode/utf16" + "unicode/utf8" + + "github.com/segmentio/encoding/ascii" +) + +// All spaces characters defined in the json specification. +const ( + sp = ' ' + ht = '\t' + nl = '\n' + cr = '\r' +) + +const ( + escape = '\\' + quote = '"' +) + +func internalParseFlags(b []byte) (flags ParseFlags) { + // Don't consider surrounding whitespace + b = skipSpaces(b) + b = trimTrailingSpaces(b) + if ascii.ValidPrint(b) { + flags |= validAsciiPrint + } + if bytes.IndexByte(b, '\\') == -1 { + flags |= noBackslash + } + return +} + +func skipSpaces(b []byte) []byte { + if len(b) > 0 && b[0] <= 0x20 { + b, _ = skipSpacesN(b) + } + return b +} + +func skipSpacesN(b []byte) ([]byte, int) { + for i := range b { + switch b[i] { + case sp, ht, nl, cr: + default: + return b[i:], i + } + } + return nil, 0 +} + +func trimTrailingSpaces(b []byte) []byte { + if len(b) > 0 && b[len(b)-1] <= 0x20 { + b = trimTrailingSpacesN(b) + } + return b +} + +func trimTrailingSpacesN(b []byte) []byte { + i := len(b) - 1 +loop: + for ; i >= 0; i-- { + switch b[i] { + case sp, ht, nl, cr: + default: + break loop + } + } + return b[:i+1] +} + +// parseInt parses a decimal representation of an int64 from b. +// +// The function is equivalent to calling strconv.ParseInt(string(b), 10, 64) but +// it prevents Go from making a memory allocation for converting a byte slice to +// a string (escape analysis fails due to the error returned by strconv.ParseInt). +// +// Because it only works with base 10 the function is also significantly faster +// than strconv.ParseInt. +func (d decoder) parseInt(b []byte, t reflect.Type) (int64, []byte, error) { + var value int64 + var count int + + if len(b) == 0 { + return 0, b, syntaxError(b, "cannot decode integer from an empty input") + } + + if b[0] == '-' { + const max = math.MinInt64 + const lim = max / 10 + + if len(b) == 1 { + return 0, b, syntaxError(b, "cannot decode integer from '-'") + } + + if len(b) > 2 && b[1] == '0' && '0' <= b[2] && b[2] <= '9' { + return 0, b, syntaxError(b, "invalid leading character '0' in integer") + } + + for _, c := range b[1:] { + if !(c >= '0' && c <= '9') { + if count == 0 { + b, err := d.inputError(b, t) + return 0, b, err + } + break + } + + if value < lim { + return 0, b, unmarshalOverflow(b, t) + } + + value *= 10 + x := int64(c - '0') + + if value < (max + x) { + return 0, b, unmarshalOverflow(b, t) + } + + value -= x + count++ + } + + count++ + } else { + if len(b) > 1 && b[0] == '0' && '0' <= b[1] && b[1] <= '9' { + return 0, b, syntaxError(b, "invalid leading character '0' in integer") + } + + for ; count < len(b) && b[count] >= '0' && b[count] <= '9'; count++ { + x := int64(b[count] - '0') + next := value*10 + x + if next < value { + return 0, b, unmarshalOverflow(b, t) + } + value = next + } + + if count == 0 { + b, err := d.inputError(b, t) + return 0, b, err + } + } + + if count < len(b) { + switch b[count] { + case '.', 'e', 'E': // was this actually a float? + v, r, _, err := d.parseNumber(b) + if err != nil { + v, r = b[:count+1], b[count+1:] + } + return 0, r, unmarshalTypeError(v, t) + } + } + + return value, b[count:], nil +} + +// parseUint is like parseInt but for unsigned integers. +func (d decoder) parseUint(b []byte, t reflect.Type) (uint64, []byte, error) { + var value uint64 + var count int + + if len(b) == 0 { + return 0, b, syntaxError(b, "cannot decode integer value from an empty input") + } + + if len(b) > 1 && b[0] == '0' && '0' <= b[1] && b[1] <= '9' { + return 0, b, syntaxError(b, "invalid leading character '0' in integer") + } + + for ; count < len(b) && b[count] >= '0' && b[count] <= '9'; count++ { + x := uint64(b[count] - '0') + next := value*10 + x + if next < value { + return 0, b, unmarshalOverflow(b, t) + } + value = next + } + + if count == 0 { + b, err := d.inputError(b, t) + return 0, b, err + } + + if count < len(b) { + switch b[count] { + case '.', 'e', 'E': // was this actually a float? + v, r, _, err := d.parseNumber(b) + if err != nil { + v, r = b[:count+1], b[count+1:] + } + return 0, r, unmarshalTypeError(v, t) + } + } + + return value, b[count:], nil +} + +// parseUintHex parses a hexadecimanl representation of a uint64 from b. +// +// The function is equivalent to calling strconv.ParseUint(string(b), 16, 64) but +// it prevents Go from making a memory allocation for converting a byte slice to +// a string (escape analysis fails due to the error returned by strconv.ParseUint). +// +// Because it only works with base 16 the function is also significantly faster +// than strconv.ParseUint. +func (d decoder) parseUintHex(b []byte) (uint64, []byte, error) { + const max = math.MaxUint64 + const lim = max / 0x10 + + var value uint64 + var count int + + if len(b) == 0 { + return 0, b, syntaxError(b, "cannot decode hexadecimal value from an empty input") + } + +parseLoop: + for i, c := range b { + var x uint64 + + switch { + case c >= '0' && c <= '9': + x = uint64(c - '0') + + case c >= 'A' && c <= 'F': + x = uint64(c-'A') + 0xA + + case c >= 'a' && c <= 'f': + x = uint64(c-'a') + 0xA + + default: + if i == 0 { + return 0, b, syntaxError(b, "expected hexadecimal digit but found '%c'", c) + } + break parseLoop + } + + if value > lim { + return 0, b, syntaxError(b, "hexadecimal value out of range") + } + + if value *= 0x10; value > (max - x) { + return 0, b, syntaxError(b, "hexadecimal value out of range") + } + + value += x + count++ + } + + return value, b[count:], nil +} + +func (d decoder) parseNull(b []byte) ([]byte, []byte, Kind, error) { + if hasNullPrefix(b) { + return b[:4], b[4:], Null, nil + } + if len(b) < 4 { + return nil, b[len(b):], Undefined, unexpectedEOF(b) + } + return nil, b, Undefined, syntaxError(b, "expected 'null' but found invalid token") +} + +func (d decoder) parseTrue(b []byte) ([]byte, []byte, Kind, error) { + if hasTruePrefix(b) { + return b[:4], b[4:], True, nil + } + if len(b) < 4 { + return nil, b[len(b):], Undefined, unexpectedEOF(b) + } + return nil, b, Undefined, syntaxError(b, "expected 'true' but found invalid token") +} + +func (d decoder) parseFalse(b []byte) ([]byte, []byte, Kind, error) { + if hasFalsePrefix(b) { + return b[:5], b[5:], False, nil + } + if len(b) < 5 { + return nil, b[len(b):], Undefined, unexpectedEOF(b) + } + return nil, b, Undefined, syntaxError(b, "expected 'false' but found invalid token") +} + +func (d decoder) parseNumber(b []byte) (v, r []byte, kind Kind, err error) { + if len(b) == 0 { + r, err = b, unexpectedEOF(b) + return + } + + // Assume it's an unsigned integer at first. + kind = Uint + + i := 0 + // sign + if b[i] == '-' { + kind = Int + i++ + } + + if i == len(b) { + r, err = b[i:], syntaxError(b, "missing number value after sign") + return + } + + if b[i] < '0' || b[i] > '9' { + r, err = b[i:], syntaxError(b, "expected digit but got '%c'", b[i]) + return + } + + // integer part + if b[i] == '0' { + i++ + if i == len(b) || (b[i] != '.' && b[i] != 'e' && b[i] != 'E') { + v, r = b[:i], b[i:] + return + } + if '0' <= b[i] && b[i] <= '9' { + r, err = b[i:], syntaxError(b, "cannot decode number with leading '0' character") + return + } + } + + for i < len(b) && '0' <= b[i] && b[i] <= '9' { + i++ + } + + // decimal part + if i < len(b) && b[i] == '.' { + kind = Float + i++ + decimalStart := i + + for i < len(b) { + if c := b[i]; !('0' <= c && c <= '9') { + if i == decimalStart { + r, err = b[i:], syntaxError(b, "expected digit but found '%c'", c) + return + } + break + } + i++ + } + + if i == decimalStart { + r, err = b[i:], syntaxError(b, "expected decimal part after '.'") + return + } + } + + // exponent part + if i < len(b) && (b[i] == 'e' || b[i] == 'E') { + kind = Float + i++ + + if i < len(b) { + if c := b[i]; c == '+' || c == '-' { + i++ + } + } + + if i == len(b) { + r, err = b[i:], syntaxError(b, "missing exponent in number") + return + } + + exponentStart := i + + for i < len(b) { + if c := b[i]; !('0' <= c && c <= '9') { + if i == exponentStart { + err = syntaxError(b, "expected digit but found '%c'", c) + return + } + break + } + i++ + } + } + + v, r = b[:i], b[i:] + return +} + +func (d decoder) parseUnicode(b []byte) (rune, int, error) { + if len(b) < 4 { + return 0, len(b), syntaxError(b, "unicode code point must have at least 4 characters") + } + + u, r, err := d.parseUintHex(b[:4]) + if err != nil { + return 0, 4, syntaxError(b, "parsing unicode code point: %s", err) + } + + if len(r) != 0 { + return 0, 4, syntaxError(b, "invalid unicode code point") + } + + return rune(u), 4, nil +} + +func (d decoder) parseString(b []byte) ([]byte, []byte, Kind, error) { + if len(b) < 2 { + return nil, b[len(b):], Undefined, unexpectedEOF(b) + } + if b[0] != '"' { + return nil, b, Undefined, syntaxError(b, "expected '\"' at the beginning of a string value") + } + + var n int + if len(b) >= 9 { + // This is an optimization for short strings. We read 8/16 bytes, + // and XOR each with 0x22 (") so that these bytes (and only + // these bytes) are now zero. We use the hasless(u,1) trick + // from https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord + // to determine whether any bytes are zero. Finally, we CTZ + // to find the index of that byte. + const mask1 = 0x2222222222222222 + const mask2 = 0x0101010101010101 + const mask3 = 0x8080808080808080 + u := binary.LittleEndian.Uint64(b[1:]) ^ mask1 + if mask := (u - mask2) & ^u & mask3; mask != 0 { + n = bits.TrailingZeros64(mask)/8 + 2 + goto found + } + if len(b) >= 17 { + u = binary.LittleEndian.Uint64(b[9:]) ^ mask1 + if mask := (u - mask2) & ^u & mask3; mask != 0 { + n = bits.TrailingZeros64(mask)/8 + 10 + goto found + } + } + } + n = bytes.IndexByte(b[1:], '"') + 2 + if n <= 1 { + return nil, b[len(b):], Undefined, syntaxError(b, "missing '\"' at the end of a string value") + } +found: + if (d.flags.has(noBackslash) || bytes.IndexByte(b[1:n], '\\') < 0) && + (d.flags.has(validAsciiPrint) || ascii.ValidPrint(b[1:n])) { + return b[:n], b[n:], Unescaped, nil + } + + for i := 1; i < len(b); i++ { + switch b[i] { + case '\\': + if i++; i < len(b) { + switch b[i] { + case '"', '\\', '/', 'n', 'r', 't', 'f', 'b': + case 'u': + _, n, err := d.parseUnicode(b[i+1:]) + if err != nil { + return nil, b[i+1+n:], Undefined, err + } + i += n + default: + return nil, b, Undefined, syntaxError(b, "invalid character '%c' in string escape code", b[i]) + } + } + + case '"': + return b[:i+1], b[i+1:], String, nil + + default: + if b[i] < 0x20 { + return nil, b, Undefined, syntaxError(b, "invalid character '%c' in string escape code", b[i]) + } + } + } + + return nil, b[len(b):], Undefined, syntaxError(b, "missing '\"' at the end of a string value") +} + +func (d decoder) parseStringUnquote(b []byte, r []byte) ([]byte, []byte, bool, error) { + s, b, k, err := d.parseString(b) + if err != nil { + return s, b, false, err + } + + s = s[1 : len(s)-1] // trim the quotes + + if k == Unescaped { + return s, b, false, nil + } + + if r == nil { + r = make([]byte, 0, len(s)) + } + + for len(s) != 0 { + i := bytes.IndexByte(s, '\\') + + if i < 0 { + r = appendCoerceInvalidUTF8(r, s) + break + } + + r = appendCoerceInvalidUTF8(r, s[:i]) + s = s[i+1:] + + c := s[0] + switch c { + case '"', '\\', '/': + // simple escaped character + case 'n': + c = '\n' + + case 'r': + c = '\r' + + case 't': + c = '\t' + + case 'b': + c = '\b' + + case 'f': + c = '\f' + + case 'u': + s = s[1:] + + r1, n1, err := d.parseUnicode(s) + if err != nil { + return r, b, true, err + } + s = s[n1:] + + if utf16.IsSurrogate(r1) { + if !hasPrefix(s, `\u`) { + r1 = unicode.ReplacementChar + } else { + r2, n2, err := d.parseUnicode(s[2:]) + if err != nil { + return r, b, true, err + } + if r1 = utf16.DecodeRune(r1, r2); r1 != unicode.ReplacementChar { + s = s[2+n2:] + } + } + } + + r = appendRune(r, r1) + continue + + default: // not sure what this escape sequence is + return r, b, false, syntaxError(s, "invalid character '%c' in string escape code", c) + } + + r = append(r, c) + s = s[1:] + } + + return r, b, true, nil +} + +func appendRune(b []byte, r rune) []byte { + n := len(b) + b = append(b, 0, 0, 0, 0) + return b[:n+utf8.EncodeRune(b[n:], r)] +} + +func appendCoerceInvalidUTF8(b []byte, s []byte) []byte { + c := [4]byte{} + + for _, r := range string(s) { + b = append(b, c[:utf8.EncodeRune(c[:], r)]...) + } + + return b +} + +func (d decoder) parseObject(b []byte) ([]byte, []byte, Kind, error) { + if len(b) < 2 { + return nil, b[len(b):], Undefined, unexpectedEOF(b) + } + + if b[0] != '{' { + return nil, b, Undefined, syntaxError(b, "expected '{' at the beginning of an object value") + } + + var err error + var a = b + var n = len(b) + var i = 0 + + b = b[1:] + for { + b = skipSpaces(b) + + if len(b) == 0 { + return nil, b, Undefined, syntaxError(b, "cannot decode object from empty input") + } + + if b[0] == '}' { + j := (n - len(b)) + 1 + return a[:j], a[j:], Object, nil + } + + if i != 0 { + if len(b) == 0 { + return nil, b, Undefined, syntaxError(b, "unexpected EOF after object field value") + } + if b[0] != ',' { + return nil, b, Undefined, syntaxError(b, "expected ',' after object field value but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + if len(b) == 0 { + return nil, b, Undefined, unexpectedEOF(b) + } + if b[0] == '}' { + return nil, b, Undefined, syntaxError(b, "unexpected trailing comma after object field") + } + } + + _, b, _, err = d.parseString(b) + if err != nil { + return nil, b, Undefined, err + } + b = skipSpaces(b) + + if len(b) == 0 { + return nil, b, Undefined, syntaxError(b, "unexpected EOF after object field key") + } + if b[0] != ':' { + return nil, b, Undefined, syntaxError(b, "expected ':' after object field key but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + + _, b, _, err = d.parseValue(b) + if err != nil { + return nil, b, Undefined, err + } + + i++ + } +} + +func (d decoder) parseArray(b []byte) ([]byte, []byte, Kind, error) { + if len(b) < 2 { + return nil, b[len(b):], Undefined, unexpectedEOF(b) + } + + if b[0] != '[' { + return nil, b, Undefined, syntaxError(b, "expected '[' at the beginning of array value") + } + + var err error + var a = b + var n = len(b) + var i = 0 + + b = b[1:] + for { + b = skipSpaces(b) + + if len(b) == 0 { + return nil, b, Undefined, syntaxError(b, "missing closing ']' after array value") + } + + if b[0] == ']' { + j := (n - len(b)) + 1 + return a[:j], a[j:], Array, nil + } + + if i != 0 { + if len(b) == 0 { + return nil, b, Undefined, syntaxError(b, "unexpected EOF after array element") + } + if b[0] != ',' { + return nil, b, Undefined, syntaxError(b, "expected ',' after array element but found '%c'", b[0]) + } + b = skipSpaces(b[1:]) + if len(b) == 0 { + return nil, b, Undefined, unexpectedEOF(b) + } + if b[0] == ']' { + return nil, b, Undefined, syntaxError(b, "unexpected trailing comma after object field") + } + } + + _, b, _, err = d.parseValue(b) + if err != nil { + return nil, b, Undefined, err + } + + i++ + } +} + +func (d decoder) parseValue(b []byte) ([]byte, []byte, Kind, error) { + if len(b) == 0 { + return nil, b, Undefined, syntaxError(b, "unexpected end of JSON input") + } + + var v []byte + var k Kind + var err error + + switch b[0] { + case '{': + v, b, k, err = d.parseObject(b) + case '[': + k = Array + v, b, k, err = d.parseArray(b) + case '"': + v, b, k, err = d.parseString(b) + case 'n': + v, b, k, err = d.parseNull(b) + case 't': + v, b, k, err = d.parseTrue(b) + case 'f': + v, b, k, err = d.parseFalse(b) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + v, b, k, err = d.parseNumber(b) + default: + err = syntaxError(b, "invalid character '%c' looking for beginning of value", b[0]) + } + + return v, b, k, err +} + +func hasNullPrefix(b []byte) bool { + return len(b) >= 4 && string(b[:4]) == "null" +} + +func hasTruePrefix(b []byte) bool { + return len(b) >= 4 && string(b[:4]) == "true" +} + +func hasFalsePrefix(b []byte) bool { + return len(b) >= 5 && string(b[:5]) == "false" +} + +func hasPrefix(b []byte, s string) bool { + return len(b) >= len(s) && s == string(b[:len(s)]) +} + +func hasLeadingSign(b []byte) bool { + return len(b) > 0 && (b[0] == '+' || b[0] == '-') +} + +func hasLeadingZeroes(b []byte) bool { + if hasLeadingSign(b) { + b = b[1:] + } + return len(b) > 1 && b[0] == '0' && '0' <= b[1] && b[1] <= '9' +} + +func appendToLower(b, s []byte) []byte { + if ascii.Valid(s) { // fast path for ascii strings + i := 0 + + for j := range s { + c := s[j] + + if 'A' <= c && c <= 'Z' { + b = append(b, s[i:j]...) + b = append(b, c+('a'-'A')) + i = j + 1 + } + } + + return append(b, s[i:]...) + } + + for _, r := range string(s) { + b = appendRune(b, foldRune(r)) + } + + return b +} + +func foldRune(r rune) rune { + if r = unicode.SimpleFold(r); 'A' <= r && r <= 'Z' { + r = r + ('a' - 'A') + } + return r +} diff --git a/vendor/github.com/segmentio/encoding/json/reflect.go b/vendor/github.com/segmentio/encoding/json/reflect.go new file mode 100644 index 00000000000..3a5c6f12179 --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/reflect.go @@ -0,0 +1,20 @@ +//go:build go1.18 +// +build go1.18 + +package json + +import ( + "reflect" + "unsafe" +) + +func extendSlice(t reflect.Type, s *slice, n int) slice { + arrayType := reflect.ArrayOf(n, t.Elem()) + arrayData := reflect.New(arrayType) + reflect.Copy(arrayData.Elem(), reflect.NewAt(t, unsafe.Pointer(s)).Elem()) + return slice{ + data: unsafe.Pointer(arrayData.Pointer()), + len: s.len, + cap: n, + } +} diff --git a/vendor/github.com/segmentio/encoding/json/reflect_optimize.go b/vendor/github.com/segmentio/encoding/json/reflect_optimize.go new file mode 100644 index 00000000000..6936cefe24d --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/reflect_optimize.go @@ -0,0 +1,30 @@ +//go:build !go1.18 +// +build !go1.18 + +package json + +import ( + "reflect" + "unsafe" +) + +//go:linkname unsafe_NewArray reflect.unsafe_NewArray +func unsafe_NewArray(rtype unsafe.Pointer, length int) unsafe.Pointer + +//go:linkname typedslicecopy reflect.typedslicecopy +//go:noescape +func typedslicecopy(elemType unsafe.Pointer, dst, src slice) int + +func extendSlice(t reflect.Type, s *slice, n int) slice { + elemTypeRef := t.Elem() + elemTypePtr := ((*iface)(unsafe.Pointer(&elemTypeRef))).ptr + + d := slice{ + data: unsafe_NewArray(elemTypePtr, n), + len: s.len, + cap: n, + } + + typedslicecopy(elemTypePtr, d, *s) + return d +} diff --git a/vendor/github.com/segmentio/encoding/json/string.go b/vendor/github.com/segmentio/encoding/json/string.go new file mode 100644 index 00000000000..dba5c5d3618 --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/string.go @@ -0,0 +1,70 @@ +package json + +import ( + "math/bits" + "unsafe" +) + +const ( + lsb = 0x0101010101010101 + msb = 0x8080808080808080 +) + +// escapeIndex finds the index of the first char in `s` that requires escaping. +// A char requires escaping if it's outside of the range of [0x20, 0x7F] or if +// it includes a double quote or backslash. If the escapeHTML mode is enabled, +// the chars <, > and & also require escaping. If no chars in `s` require +// escaping, the return value is -1. +func escapeIndex(s string, escapeHTML bool) int { + chunks := stringToUint64(s) + for _, n := range chunks { + // combine masks before checking for the MSB of each byte. We include + // `n` in the mask to check whether any of the *input* byte MSBs were + // set (i.e. the byte was outside the ASCII range). + mask := n | below(n, 0x20) | contains(n, '"') | contains(n, '\\') + if escapeHTML { + mask |= contains(n, '<') | contains(n, '>') | contains(n, '&') + } + if (mask & msb) != 0 { + return bits.TrailingZeros64(mask&msb) / 8 + } + } + + for i := len(chunks) * 8; i < len(s); i++ { + c := s[i] + if c < 0x20 || c > 0x7f || c == '"' || c == '\\' || (escapeHTML && (c == '<' || c == '>' || c == '&')) { + return i + } + } + + return -1 +} + +// below return a mask that can be used to determine if any of the bytes +// in `n` are below `b`. If a byte's MSB is set in the mask then that byte was +// below `b`. The result is only valid if `b`, and each byte in `n`, is below +// 0x80. +func below(n uint64, b byte) uint64 { + return n - expand(b) +} + +// contains returns a mask that can be used to determine if any of the +// bytes in `n` are equal to `b`. If a byte's MSB is set in the mask then +// that byte is equal to `b`. The result is only valid if `b`, and each +// byte in `n`, is below 0x80. +func contains(n uint64, b byte) uint64 { + return (n ^ expand(b)) - lsb +} + +// expand puts the specified byte into each of the 8 bytes of a uint64. +func expand(b byte) uint64 { + return lsb * uint64(b) +} + +func stringToUint64(s string) []uint64 { + return *(*[]uint64)(unsafe.Pointer(&sliceHeader{ + Data: *(*unsafe.Pointer)(unsafe.Pointer(&s)), + Len: len(s) / 8, + Cap: len(s) / 8, + })) +} diff --git a/vendor/github.com/segmentio/encoding/json/token.go b/vendor/github.com/segmentio/encoding/json/token.go new file mode 100644 index 00000000000..652e36d76fa --- /dev/null +++ b/vendor/github.com/segmentio/encoding/json/token.go @@ -0,0 +1,416 @@ +package json + +import ( + "strconv" + "sync" + "unsafe" +) + +// Tokenizer is an iterator-style type which can be used to progressively parse +// through a json input. +// +// Tokenizing json is useful to build highly efficient parsing operations, for +// example when doing tranformations on-the-fly where as the program reads the +// input and produces the transformed json to an output buffer. +// +// Here is a common pattern to use a tokenizer: +// +// for t := json.NewTokenizer(b); t.Next(); { +// switch k := t.Kind(); k.Class() { +// case json.Null: +// ... +// case json.Bool: +// ... +// case json.Num: +// ... +// case json.String: +// ... +// case json.Array: +// ... +// case json.Object: +// ... +// } +// } +// +type Tokenizer struct { + // When the tokenizer is positioned on a json delimiter this field is not + // zero. In this case the possible values are '{', '}', '[', ']', ':', and + // ','. + Delim Delim + + // This field contains the raw json token that the tokenizer is pointing at. + // When Delim is not zero, this field is a single-element byte slice + // continaing the delimiter value. Otherwise, this field holds values like + // null, true, false, numbers, or quoted strings. + Value RawValue + + // When the tokenizer has encountered invalid content this field is not nil. + Err error + + // When the value is in an array or an object, this field contains the depth + // at which it was found. + Depth int + + // When the value is in an array or an object, this field contains the + // position at which it was found. + Index int + + // This field is true when the value is the key of an object. + IsKey bool + + // Tells whether the next value read from the tokenizer is a key. + isKey bool + + // json input for the tokenizer, pointing at data right after the last token + // that was parsed. + json []byte + + // Stack used to track entering and leaving arrays, objects, and keys. + stack *stack + + // Decoder used for parsing. + decoder +} + +// NewTokenizer constructs a new Tokenizer which reads its json input from b. +func NewTokenizer(b []byte) *Tokenizer { + return &Tokenizer{ + json: b, + decoder: decoder{flags: internalParseFlags(b)}, + } +} + +// Reset erases the state of t and re-initializes it with the json input from b. +func (t *Tokenizer) Reset(b []byte) { + if t.stack != nil { + releaseStack(t.stack) + } + // This code is similar to: + // + // *t = Tokenizer{json: b} + // + // However, it does not compile down to an invocation of duff-copy. + t.Delim = 0 + t.Value = nil + t.Err = nil + t.Depth = 0 + t.Index = 0 + t.IsKey = false + t.isKey = false + t.json = b + t.stack = nil + t.decoder = decoder{flags: internalParseFlags(b)} +} + +// Next returns a new tokenizer pointing at the next token, or the zero-value of +// Tokenizer if the end of the json input has been reached. +// +// If the tokenizer encounters malformed json while reading the input the method +// sets t.Err to an error describing the issue, and returns false. Once an error +// has been encountered, the tokenizer will always fail until its input is +// cleared by a call to its Reset method. +func (t *Tokenizer) Next() bool { + if t.Err != nil { + return false + } + + // Inlined code of the skipSpaces function, this give a ~15% speed boost. + i := 0 +skipLoop: + for _, c := range t.json { + switch c { + case sp, ht, nl, cr: + i++ + default: + break skipLoop + } + } + + if i > 0 { + t.json = t.json[i:] + } + + if len(t.json) == 0 { + t.Reset(nil) + return false + } + + var kind Kind + switch t.json[0] { + case '"': + t.Delim = 0 + t.Value, t.json, kind, t.Err = t.parseString(t.json) + case 'n': + t.Delim = 0 + t.Value, t.json, kind, t.Err = t.parseNull(t.json) + case 't': + t.Delim = 0 + t.Value, t.json, kind, t.Err = t.parseTrue(t.json) + case 'f': + t.Delim = 0 + t.Value, t.json, kind, t.Err = t.parseFalse(t.json) + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + t.Delim = 0 + t.Value, t.json, kind, t.Err = t.parseNumber(t.json) + case '{', '}', '[', ']', ':', ',': + t.Delim, t.Value, t.json = Delim(t.json[0]), t.json[:1], t.json[1:] + switch t.Delim { + case '{': + kind = Object + case '[': + kind = Array + } + default: + t.Delim = 0 + t.Value, t.json, t.Err = t.json[:1], t.json[1:], syntaxError(t.json, "expected token but found '%c'", t.json[0]) + } + + t.Depth = t.depth() + t.Index = t.index() + t.flags = t.flags.withKind(kind) + + if t.Delim == 0 { + t.IsKey = t.isKey + } else { + t.IsKey = false + + switch t.Delim { + case '{': + t.isKey = true + t.push(inObject) + case '[': + t.push(inArray) + case '}': + t.Err = t.pop(inObject) + t.Depth-- + t.Index = t.index() + case ']': + t.Err = t.pop(inArray) + t.Depth-- + t.Index = t.index() + case ':': + t.isKey = false + case ',': + if t.stack == nil || len(t.stack.state) == 0 { + t.Err = syntaxError(t.json, "found unexpected comma") + return false + } + if t.stack.is(inObject) { + t.isKey = true + } + t.stack.state[len(t.stack.state)-1].len++ + } + } + + return (t.Delim != 0 || len(t.Value) != 0) && t.Err == nil +} + +func (t *Tokenizer) depth() int { + if t.stack == nil { + return 0 + } + return t.stack.depth() +} + +func (t *Tokenizer) index() int { + if t.stack == nil { + return 0 + } + return t.stack.index() +} + +func (t *Tokenizer) push(typ scope) { + if t.stack == nil { + t.stack = acquireStack() + } + t.stack.push(typ) +} + +func (t *Tokenizer) pop(expect scope) error { + if t.stack == nil || !t.stack.pop(expect) { + return syntaxError(t.json, "found unexpected character while tokenizing json input") + } + return nil +} + +// Kind returns the kind of the value that the tokenizer is currently positioned +// on. +func (t *Tokenizer) Kind() Kind { return t.flags.kind() } + +// Bool returns a bool containing the value of the json boolean that the +// tokenizer is currently pointing at. +// +// This method must only be called after checking the kind of the token via a +// call to Kind. +// +// If the tokenizer is not positioned on a boolean, the behavior is undefined. +func (t *Tokenizer) Bool() bool { return t.flags.kind() == True } + +// Int returns a byte slice containing the value of the json number that the +// tokenizer is currently pointing at. +// +// This method must only be called after checking the kind of the token via a +// call to Kind. +// +// If the tokenizer is not positioned on an integer, the behavior is undefined. +func (t *Tokenizer) Int() int64 { + i, _, _ := t.parseInt(t.Value, int64Type) + return i +} + +// Uint returns a byte slice containing the value of the json number that the +// tokenizer is currently pointing at. +// +// This method must only be called after checking the kind of the token via a +// call to Kind. +// +// If the tokenizer is not positioned on a positive integer, the behavior is +// undefined. +func (t *Tokenizer) Uint() uint64 { + u, _, _ := t.parseUint(t.Value, uint64Type) + return u +} + +// Float returns a byte slice containing the value of the json number that the +// tokenizer is currently pointing at. +// +// This method must only be called after checking the kind of the token via a +// call to Kind. +// +// If the tokenizer is not positioned on a number, the behavior is undefined. +func (t *Tokenizer) Float() float64 { + f, _ := strconv.ParseFloat(*(*string)(unsafe.Pointer(&t.Value)), 64) + return f +} + +// String returns a byte slice containing the value of the json string that the +// tokenizer is currently pointing at. +// +// This method must only be called after checking the kind of the token via a +// call to Kind. +// +// When possible, the returned byte slice references the backing array of the +// tokenizer. A new slice is only allocated if the tokenizer needed to unescape +// the json string. +// +// If the tokenizer is not positioned on a string, the behavior is undefined. +func (t *Tokenizer) String() []byte { + if t.flags.kind() == Unescaped && len(t.Value) > 1 { + return t.Value[1 : len(t.Value)-1] // unquote + } + s, _, _, _ := t.parseStringUnquote(t.Value, nil) + return s +} + +// RawValue represents a raw json value, it is intended to carry null, true, +// false, number, and string values only. +type RawValue []byte + +// String returns true if v contains a string value. +func (v RawValue) String() bool { return len(v) != 0 && v[0] == '"' } + +// Null returns true if v contains a null value. +func (v RawValue) Null() bool { return len(v) != 0 && v[0] == 'n' } + +// True returns true if v contains a true value. +func (v RawValue) True() bool { return len(v) != 0 && v[0] == 't' } + +// False returns true if v contains a false value. +func (v RawValue) False() bool { return len(v) != 0 && v[0] == 'f' } + +// Number returns true if v contains a number value. +func (v RawValue) Number() bool { + if len(v) != 0 { + switch v[0] { + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return true + } + } + return false +} + +// AppendUnquote writes the unquoted version of the string value in v into b. +func (v RawValue) AppendUnquote(b []byte) []byte { + d := decoder{} + s, r, _, err := d.parseStringUnquote(v, b) + if err != nil { + panic(err) + } + if len(r) != 0 { + panic(syntaxError(r, "unexpected trailing tokens after json value")) + } + return append(b, s...) +} + +// Unquote returns the unquoted version of the string value in v. +func (v RawValue) Unquote() []byte { + return v.AppendUnquote(nil) +} + +type scope int + +const ( + inArray scope = iota + inObject +) + +type state struct { + typ scope + len int +} + +type stack struct { + state []state +} + +func (s *stack) push(typ scope) { + s.state = append(s.state, state{typ: typ, len: 1}) +} + +func (s *stack) pop(expect scope) bool { + i := len(s.state) - 1 + + if i < 0 { + return false + } + + if found := s.state[i]; expect != found.typ { + return false + } + + s.state = s.state[:i] + return true +} + +func (s *stack) is(typ scope) bool { + return len(s.state) != 0 && s.state[len(s.state)-1].typ == typ +} + +func (s *stack) depth() int { + return len(s.state) +} + +func (s *stack) index() int { + if len(s.state) == 0 { + return 0 + } + return s.state[len(s.state)-1].len - 1 +} + +func acquireStack() *stack { + s, _ := stackPool.Get().(*stack) + if s == nil { + s = &stack{state: make([]state, 0, 4)} + } else { + s.state = s.state[:0] + } + return s +} + +func releaseStack(s *stack) { + stackPool.Put(s) +} + +var ( + stackPool sync.Pool // *stack +) diff --git a/vendor/go.lsp.dev/jsonrpc2/.codecov.yml b/vendor/go.lsp.dev/jsonrpc2/.codecov.yml new file mode 100644 index 00000000000..95e428957d4 --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/.codecov.yml @@ -0,0 +1,38 @@ +codecov: + allow_coverage_offsets: true + notify: + wait_for_ci: false + +coverage: + precision: 1 + round: down + range: "70...100" + + status: + project: + default: + target: auto + threshold: 1% + if_ci_failed: error + if_not_found: success + patch: + default: + only_pulls: true + target: 50% + threshold: 10% + if_ci_failed: error + if_not_found: failure + changes: + default: + if_ci_failed: error + if_not_found: success + only_pulls: false + branches: + - main + +comment: + behavior: default + show_carryforward_flags: true + +github_checks: + annotations: true diff --git a/vendor/go.lsp.dev/jsonrpc2/.errcheckignore b/vendor/go.lsp.dev/jsonrpc2/.errcheckignore new file mode 100644 index 00000000000..7fda6d3a3eb --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/.errcheckignore @@ -0,0 +1 @@ +(*go.lsp.dev/jsonrpc2.Request).Reply diff --git a/vendor/go.lsp.dev/jsonrpc2/.gitattributes b/vendor/go.lsp.dev/jsonrpc2/.gitattributes new file mode 100644 index 00000000000..169fc0a41f7 --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/.gitattributes @@ -0,0 +1,11 @@ +# go.lsp.dev/jsonrpc2 project gitattributes file +# https://github.com/github/linguist#using-gitattributes +# https://github.com/github/linguist/blob/master/lib/linguist/languages.yml + +# To prevent CRLF breakages on Windows for fragile files, like testdata. +* -text + +docs/ linguist-documentation +*.pb.go linguist-generated +*_gen.go linguist-generated +*_string.go linguist-generated diff --git a/vendor/go.lsp.dev/jsonrpc2/.gitignore b/vendor/go.lsp.dev/jsonrpc2/.gitignore new file mode 100644 index 00000000000..6211ff933bd --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/.gitignore @@ -0,0 +1,52 @@ +# go.lsp.dev/jsonrpc2 project generated files to ignore +# if you want to ignore files created by your editor/tools, +# please consider a global .gitignore https://help.github.com/articles/ignoring-files +# please do not open a pull request to add something created by your editor or tools + +# github/gitignore/Go.gitignore +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +vendor/ + +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +# cgo generated +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +# test generated +_testmain.go + +# profile +*.pprof + +# coverage +coverage.* + +# tools +bin/ diff --git a/vendor/go.lsp.dev/jsonrpc2/.golangci.yml b/vendor/go.lsp.dev/jsonrpc2/.golangci.yml new file mode 100644 index 00000000000..c363c1835c2 --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/.golangci.yml @@ -0,0 +1,200 @@ +run: + timeout: 5m + issues-exit-code: 1 + tests: true + skip-dirs: [] + skip-dirs-use-default: true + skip-files: [] + allow-parallel-runners: true + +output: + format: colored-line-number + print-issued-lines: true + print-linter-name: true + uniq-by-line: true + sort-results: true + +linters-settings: + dupl: + threshold: 100 + # errcheck: + # check-type-assertions: true + # check-blank: true + # exclude: .errcheckignore + funlen: + lines: 100 + statements: 60 + gocognit: + min-complexity: 20 + goconst: + min-len: 3 + min-occurrences: 3 + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - commentedOutCode + - whyNoLint + settings: + hugeParam: + sizeThreshold: 80 + rangeExprCopy: + sizeThreshold: 512 + rangeValCopy: + sizeThreshold: 128 + gocyclo: + min-complexity: 15 + godot: + scope: declarations + capital: false + gofmt: + simplify: true + goimports: + local-prefixes: go.lsp.dev/jsonrpc2 + golint: + min-confidence: 0.3 + govet: + enable-all: true + check-shadowing: true + disable: + - fieldalignment + depguard: + list-type: blacklist + include-go-root: true + # packages-with-error-message: + # - github.com/sirupsen/logrus: "logging is allowed only by logutils.Log" + lll: + line-length: 120 + tab-width: 1 + maligned: + suggest-new: true + misspell: + locale: US + ignore-words: + - cancelled + nakedret: + max-func-lines: 30 + prealloc: + simple: true + range-loops: true + for-loops: true + testpackage: + skip-regexp: '.*(export)_test\.go' + unparam: + check-exported: true + algo: cha + unused: + check-exported: false + whitespace: + multi-if: true + multi-func: true + +linters: + fast: false + disabled: + - deadcode # Finds unused code + - errcheck # Errcheck is a program for checking for unchecked errors in go programs + - exhaustivestruct # Checks if all struct's fields are initialized + - forbidigo # Forbids identifiers + - gci # Gci control golang package import order and make it always deterministic + - gochecknoglobals # check that no global variables exist + - gochecknoinits # Checks that no init functions are present in Go code + - godox # Tool for detection of FIXME, TODO and other comment keywords + - goerr113 # Golang linter to check the errors handling expressions + - gofumpt # Gofumpt checks whether code was gofumpt-ed + - goheader # Checks is file header matches to pattern + - golint # Golint differs from gofmt. Gofmt reformats Go source code, whereas golint prints out style mistakes + - gomnd # An analyzer to detect magic numbers + - gomodguard # Allow and block list linter for direct Go module dependencies + - gosec # Inspects source code for security problems + - nlreturn # nlreturn checks for a new line before return and branch statements to increase code clarity + - paralleltest # paralleltest detects missing usage of t.Parallel() method in your Go test + - scopelint # Scopelint checks for unpinned variables in go programs + - sqlclosecheck # Checks that sql.Rows and sql.Stmt are closed + - unparam # Reports unused function parameters + - wrapcheck # Checks that errors returned from external packages are wrapped TODO(zchee): enable + - wsl # Whitespace Linter + enable: + - asciicheck # Simple linter to check that your code does not contain non-ASCII identifiers + - bodyclose # checks whether HTTP response body is closed successfully + - depguard # Go linter that checks if package imports are in a list of acceptable packages + - dogsled # Checks assignments with too many blank identifiers + - dupl # Tool for code clone detection + - errorlint # source code linter for Go software that can be used to find code that will cause problemswith the error wrapping scheme introduced in Go 1.13 + - exhaustive # check exhaustiveness of enum switch statements + - exportloopref # checks for pointers to enclosing loop variables + - funlen # Tool for detection of long functions + - gocognit # Computes and checks the cognitive complexity of functions + - goconst # Finds repeated strings that could be replaced by a constant + - gocritic # The most opinionated Go source code linter + - gocyclo # Computes and checks the cyclomatic complexity of functions + - godot # Check if comments end in a period + - gofmt # Gofmt checks whether code was gofmt-ed. By default this tool runs with -s option to check for code simplification + - goimports # Goimports does everything that gofmt does. Additionally it checks unused imports + - goprintffuncname # Checks that printf-like functions are named with `f` at the end + - gosimple # Linter for Go source code that specializes in simplifying a code + - govet # Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string + - ifshort # Checks that your code uses short syntax for if-statements whenever possible + - ineffassign # Detects when assignments to existing variables are not used + - lll # Reports long lines + - makezero # Finds slice declarations with non-zero initial length + - misspell # Finds commonly misspelled English words in comments + - nakedret # Finds naked returns in functions greater than a specified function length + - nestif # Reports deeply nested if statements + - noctx # noctx finds sending http request without context.Context + - nolintlint # Reports ill-formed or insufficient nolint directives + - prealloc # Finds slice declarations that could potentially be preallocated + - predeclared # find code that shadows one of Go's predeclared identifiers + - rowserrcheck # checks whether Err of rows is checked successfully + - staticcheck # Staticcheck is a go vet on steroids, applying a ton of static analysis checks + - structcheck # Finds unused struct fields + - stylecheck # Stylecheck is a replacement for golint + - testpackage # linter that makes you use a separate _test package + - thelper # thelper detects golang test helpers without t.Helper() call and checks the consistency of test helpers + - tparallel # tparallel detects inappropriate usage of t.Parallel() method in your Go test codes + - typecheck # Like the front-end of a Go compiler, parses and type-checks Go code + - unconvert # Remove unnecessary type conversions + - unused # Checks Go code for unused constants, variables, functions and types + - varcheck # Finds unused global variables and constants + - whitespace # Tool for detection of leading and trailing whitespace + +issues: + max-same-issues: 0 + exclude-use-default: true + exclude-rules: + - path: _test\.go + linters: + - errcheck + - funlen + - gocognit + - goconst + - gocyclo + - lll + - maligned + - wrapcheck + - path: "(.*)?_example_test.go" + linters: + - gocritic + # `TestMain` function is no longer required to call `os.Exit` since Go 1.15. + # ref: https://golang.org/doc/go1.15#testing + - text: "SA3000:" + linters: + - staticcheck + # Exclude shadow checking on the variable named err + - text: "shadow: declaration of \"(err|ok)\"" + linters: + - govet + # fake implements + - path: fake/fake.go + linters: + - errcheck + # future use + - path: wire.go + text: "`(codeServerErrorStart|codeServerErrorEnd)` is unused" + # goroutine + - path: handler.go + text: "Error return value of `handler` is not checked" diff --git a/vendor/go.lsp.dev/jsonrpc2/LICENSE b/vendor/go.lsp.dev/jsonrpc2/LICENSE new file mode 100644 index 00000000000..e8748709cfb --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2019, The Go Language Server Authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/go.lsp.dev/jsonrpc2/Makefile b/vendor/go.lsp.dev/jsonrpc2/Makefile new file mode 100644 index 00000000000..a09304bf4b6 --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/Makefile @@ -0,0 +1,129 @@ +# ----------------------------------------------------------------------------- +# global + +.DEFAULT_GOAL := test +comma := , +empty := +space := $(empty) $(empty) + +# ----------------------------------------------------------------------------- +# go + +GO_PATH ?= $(shell go env GOPATH) +GO_OS ?= $(shell go env GOOS) +GO_ARCH ?= $(shell go env GOARCH) + +PKG := $(subst $(GO_PATH)/src/,,$(CURDIR)) +CGO_ENABLED ?= 0 +GO_BUILDTAGS=osusergo netgo static +GO_LDFLAGS=-s -w "-extldflags=-static" +GO_FLAGS ?= -tags='$(subst $(space),$(comma),${GO_BUILDTAGS})' -ldflags='${GO_LDFLAGS}' -installsuffix=netgo + +GO_PKGS := $(shell go list ./...) +GO_TEST ?= ${TOOLS_BIN}/gotestsum -- +GO_TEST_PKGS ?= $(shell go list -f='{{if or .TestGoFiles .XTestGoFiles}}{{.ImportPath}}{{end}}' ./...) +GO_TEST_FLAGS ?= -race -count=1 +GO_TEST_FUNC ?= . +GO_COVERAGE_OUT ?= coverage.out +GO_BENCH_FLAGS ?= -benchmem +GO_BENCH_FUNC ?= . +GO_LINT_FLAGS ?= + +TOOLS := $(shell cd tools; go list -f '{{ join .Imports " " }}' -tags=tools) +TOOLS_BIN := ${CURDIR}/tools/bin + +# Set build environment +JOBS := $(shell getconf _NPROCESSORS_CONF) + +# ----------------------------------------------------------------------------- +# defines + +define target +@printf "+ $(patsubst ,$@,$(1))\\n" >&2 +endef + +# ----------------------------------------------------------------------------- +# target + +##@ test, bench, coverage + +export GOTESTSUM_FORMAT=standard-verbose + +.PHONY: test +test: CGO_ENABLED=1 +test: GO_FLAGS=-tags='$(subst ${space},${comma},${GO_BUILDTAGS})' +test: tools/bin/gotestsum ## Runs package test including race condition. + $(call target) + @CGO_ENABLED=${CGO_ENABLED} ${GO_TEST} ${GO_TEST_FLAGS} -run=${GO_TEST_FUNC} $(strip ${GO_FLAGS}) ${GO_TEST_PKGS} + +.PHONY: bench +bench: ## Take a package benchmark. + $(call target) + @CGO_ENABLED=${CGO_ENABLED} ${GO_TEST} -run='^$$' -bench=${GO_BENCH_FUNC} ${GO_BENCH_FLAGS} $(strip ${GO_FLAGS}) ${GO_TEST_PKGS} + +.PHONY: coverage +coverage: CGO_ENABLED=1 +coverage: GO_FLAGS=-tags='$(subst ${space},${comma},${GO_BUILDTAGS})' +coverage: tools/bin/gotestsum ## Takes packages test coverage. + $(call target) + @CGO_ENABLED=${CGO_ENABLED} ${GO_TEST} ${GO_TEST_FLAGS} -covermode=atomic -coverpkg=${PKG}/... -coverprofile=${GO_COVERAGE_OUT} $(strip ${GO_FLAGS}) ${GO_PKGS} + + +##@ fmt, lint + +.PHONY: lint +lint: fmt lint/golangci-lint ## Run all linters. + +.PHONY: fmt +fmt: tools/bin/goimportz tools/bin/gofumpt ## Run goimportz and gofumpt. + $(call target) + find . -iname "*.go" -not -path "./vendor/**" | xargs -P ${JOBS} ${TOOLS_BIN}/goimportz -local=${PKG},$(subst /jsonrpc2,,$(PKG)) -w + find . -iname "*.go" -not -path "./vendor/**" | xargs -P ${JOBS} ${TOOLS_BIN}/gofumpt -extra -w + +.PHONY: lint/golangci-lint +lint/golangci-lint: tools/bin/golangci-lint .golangci.yml ## Run golangci-lint. + $(call target) + ${TOOLS_BIN}/golangci-lint -j ${JOBS} run $(strip ${GO_LINT_FLAGS}) ./... + + +##@ tools + +.PHONY: tools +tools: tools/bin/'' ## Install tools + +tools/%: tools/bin/% ## install an individual dependent tool + +tools/bin/%: ${CURDIR}/tools/go.mod ${CURDIR}/tools/go.sum + @cd tools; \ + for t in ${TOOLS}; do \ + if [ -z '$*' ] || [ $$(basename $$t) = '$*' ]; then \ + echo "Install $$t ..."; \ + GOBIN=${TOOLS_BIN} CGO_ENABLED=0 go install -v -mod=mod ${GO_FLAGS} "$${t}"; \ + fi \ + done + + +##@ clean + +.PHONY: clean +clean: ## Cleanups binaries and extra files in the package. + $(call target) + @rm -rf *.out *.test *.prof trace.txt ${TOOLS_BIN} + + +##@ miscellaneous + +.PHONY: todo +TODO: ## Print the all of (TODO|BUG|XXX|FIXME|NOTE) in packages. + @grep -E '(TODO|BUG|XXX|FIXME)(\(.+\):|:)' $(shell find . -type f -name '*.go' -and -not -iwholename '*vendor*') + +.PHONY: env/% +env/%: ## Print the value of MAKEFILE_VARIABLE. Use `make env/GO_FLAGS` or etc. + @echo $($*) + + +##@ help + +.PHONY: help +help: ## Show this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[33m\033[0m\n"} /^[a-zA-Z_0-9\/%_-]+:.*?##/ { printf " \033[1;32m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) diff --git a/vendor/go.lsp.dev/jsonrpc2/README.md b/vendor/go.lsp.dev/jsonrpc2/README.md new file mode 100644 index 00000000000..373befc736d --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/README.md @@ -0,0 +1,19 @@ +# jsonrpc2 + +[![CircleCI][circleci-badge]][circleci] [![pkg.go.dev][pkg.go.dev-badge]][pkg.go.dev] [![Go module][module-badge]][module] [![codecov.io][codecov-badge]][codecov] [![GA][ga-badge]][ga] + +Package jsonrpc2 is an implementation of the JSON-RPC 2 specification for Go. + + + +[circleci]: https://app.circleci.com/pipelines/github/go-language-server/jsonrpc2 +[pkg.go.dev]: https://pkg.go.dev/go.lsp.dev/jsonrpc2 +[module]: https://github.com/go-language-server/jsonrpc2/releases/latest +[codecov]: https://codecov.io/gh/go-language-server/jsonrpc2 +[ga]: https://github.com/go-language-server/jsonrpc2 + +[circleci-badge]: https://img.shields.io/circleci/build/github/go-language-server/jsonrpc2/main.svg?style=for-the-badge&label=CIRCLECI&logo=circleci +[pkg.go.dev-badge]: https://bit.ly/shields-io-pkg-go-dev +[module-badge]: https://img.shields.io/github/release/go-language-server/jsonrpc2.svg?color=00add8&label=MODULE&style=for-the-badge&logoWidth=25&logo=data%3Aimage%2Fsvg%2Bxml%3Bbase64%2CPHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9Ijg1IDU1IDEyMCAxMjAiPjxwYXRoIGZpbGw9IiMwMEFERDgiIGQ9Ik00MC4yIDEwMS4xYy0uNCAwLS41LS4yLS4zLS41bDIuMS0yLjdjLjItLjMuNy0uNSAxLjEtLjVoMzUuN2MuNCAwIC41LjMuMy42bC0xLjcgMi42Yy0uMi4zLS43LjYtMSAuNmwtMzYuMi0uMXptLTE1LjEgOS4yYy0uNCAwLS41LS4yLS4zLS41bDIuMS0yLjdjLjItLjMuNy0uNSAxLjEtLjVoNDUuNmMuNCAwIC42LjMuNS42bC0uOCAyLjRjLS4xLjQtLjUuNi0uOS42bC00Ny4zLjF6bTI0LjIgOS4yYy0uNCAwLS41LS4zLS4zLS42bDEuNC0yLjVjLjItLjMuNi0uNiAxLS42aDIwYy40IDAgLjYuMy42LjdsLS4yIDIuNGMwIC40LS40LjctLjcuN2wtMjEuOC0uMXptMTAzLjgtMjAuMmMtNi4zIDEuNi0xMC42IDIuOC0xNi44IDQuNC0xLjUuNC0xLjYuNS0yLjktMS0xLjUtMS43LTIuNi0yLjgtNC43LTMuOC02LjMtMy4xLTEyLjQtMi4yLTE4LjEgMS41LTYuOCA0LjQtMTAuMyAxMC45LTEwLjIgMTkgLjEgOCA1LjYgMTQuNiAxMy41IDE1LjcgNi44LjkgMTIuNS0xLjUgMTctNi42LjktMS4xIDEuNy0yLjMgMi43LTMuN2gtMTkuM2MtMi4xIDAtMi42LTEuMy0xLjktMyAxLjMtMy4xIDMuNy04LjMgNS4xLTEwLjkuMy0uNiAxLTEuNiAyLjUtMS42aDM2LjRjLS4yIDIuNy0uMiA1LjQtLjYgOC4xLTEuMSA3LjItMy44IDEzLjgtOC4yIDE5LjYtNy4yIDkuNS0xNi42IDE1LjQtMjguNSAxNy05LjggMS4zLTE4LjktLjYtMjYuOS02LjYtNy40LTUuNi0xMS42LTEzLTEyLjctMjIuMi0xLjMtMTAuOSAxLjktMjAuNyA4LjUtMjkuMyA3LjEtOS4zIDE2LjUtMTUuMiAyOC0xNy4zIDkuNC0xLjcgMTguNC0uNiAyNi41IDQuOSA1LjMgMy41IDkuMSA4LjMgMTEuNiAxNC4xLjYuOS4yIDEuNC0xIDEuN3oiLz48cGF0aCBmaWxsPSIjMDBBREQ4IiBkPSJNMTg2LjIgMTU0LjZjLTkuMS0uMi0xNy40LTIuOC0yNC40LTguOC01LjktNS4xLTkuNi0xMS42LTEwLjgtMTkuMy0xLjgtMTEuMyAxLjMtMjEuMyA4LjEtMzAuMiA3LjMtOS42IDE2LjEtMTQuNiAyOC0xNi43IDEwLjItMS44IDE5LjgtLjggMjguNSA1LjEgNy45IDUuNCAxMi44IDEyLjcgMTQuMSAyMi4zIDEuNyAxMy41LTIuMiAyNC41LTExLjUgMzMuOS02LjYgNi43LTE0LjcgMTAuOS0yNCAxMi44LTIuNy41LTUuNC42LTggLjl6bTIzLjgtNDAuNGMtLjEtMS4zLS4xLTIuMy0uMy0zLjMtMS44LTkuOS0xMC45LTE1LjUtMjAuNC0xMy4zLTkuMyAyLjEtMTUuMyA4LTE3LjUgMTcuNC0xLjggNy44IDIgMTUuNyA5LjIgMTguOSA1LjUgMi40IDExIDIuMSAxNi4zLS42IDcuOS00LjEgMTIuMi0xMC41IDEyLjctMTkuMXoiLz48L3N2Zz4= +[codecov-badge]: https://img.shields.io/codecov/c/github/go-language-server/jsonrpc2/main?logo=codecov&style=for-the-badge +[ga-badge]: https://gh-ga-beacon.appspot.com/UA-89201129-1/go-language-server/jsonrpc2?useReferer&pixel diff --git a/vendor/go.lsp.dev/jsonrpc2/codes.go b/vendor/go.lsp.dev/jsonrpc2/codes.go new file mode 100644 index 00000000000..5da58ea3f64 --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/codes.go @@ -0,0 +1,86 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package jsonrpc2 + +// Code is an error code as defined in the JSON-RPC spec. +type Code int32 + +// list of JSON-RPC error codes. +const ( + // ParseError is the invalid JSON was received by the server. + // An error occurred on the server while parsing the JSON text. + ParseError Code = -32700 + + // InvalidRequest is the JSON sent is not a valid Request object. + InvalidRequest Code = -32600 + + // MethodNotFound is the method does not exist / is not available. + MethodNotFound Code = -32601 + + // InvalidParams is the invalid method parameter(s). + InvalidParams Code = -32602 + + // InternalError is the internal JSON-RPC error. + InternalError Code = -32603 + + // JSONRPCReservedErrorRangeStart is the start range of JSON RPC reserved error codes. + // + // It doesn't denote a real error code. No LSP error codes should + // be defined between the start and end range. For backwards + // compatibility the "ServerNotInitialized" and the "UnknownErrorCode" + // are left in the range. + // + // @since 3.16.0. + JSONRPCReservedErrorRangeStart Code = -32099 + + // CodeServerErrorStart reserved for implementation-defined server-errors. + // + // Deprecated: Use JSONRPCReservedErrorRangeStart instead. + CodeServerErrorStart = JSONRPCReservedErrorRangeStart + + // ServerNotInitialized is the error of server not initialized. + ServerNotInitialized Code = -32002 + + // UnknownError should be used for all non coded errors. + UnknownError Code = -32001 + + // JSONRPCReservedErrorRangeEnd is the start range of JSON RPC reserved error codes. + // + // It doesn't denote a real error code. + // + // @since 3.16.0. + JSONRPCReservedErrorRangeEnd Code = -32000 + + // CodeServerErrorEnd reserved for implementation-defined server-errors. + // + // Deprecated: Use JSONRPCReservedErrorRangeEnd instead. + CodeServerErrorEnd = JSONRPCReservedErrorRangeEnd +) + +// This file contains the Go forms of the wire specification. +// +// See http://www.jsonrpc.org/specification for details. +// +// list of JSON-RPC errors. +var ( + // ErrUnknown should be used for all non coded errors. + ErrUnknown = NewError(UnknownError, "JSON-RPC unknown error") + + // ErrParse is used when invalid JSON was received by the server. + ErrParse = NewError(ParseError, "JSON-RPC parse error") + + // ErrInvalidRequest is used when the JSON sent is not a valid Request object. + ErrInvalidRequest = NewError(InvalidRequest, "JSON-RPC invalid request") + + // ErrMethodNotFound should be returned by the handler when the method does + // not exist / is not available. + ErrMethodNotFound = NewError(MethodNotFound, "JSON-RPC method not found") + + // ErrInvalidParams should be returned by the handler when method + // parameter(s) were invalid. + ErrInvalidParams = NewError(InvalidParams, "JSON-RPC invalid params") + + // ErrInternal is not currently returned but defined for completeness. + ErrInternal = NewError(InternalError, "JSON-RPC internal error") +) diff --git a/vendor/go.lsp.dev/jsonrpc2/conn.go b/vendor/go.lsp.dev/jsonrpc2/conn.go new file mode 100644 index 00000000000..3b12819babe --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/conn.go @@ -0,0 +1,245 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package jsonrpc2 + +import ( + "bytes" + "context" + "fmt" + "sync" + "sync/atomic" + + "github.com/segmentio/encoding/json" +) + +// Conn is the common interface to jsonrpc clients and servers. +// +// Conn is bidirectional; it does not have a designated server or client end. +// It manages the jsonrpc2 protocol, connecting responses back to their calls. +type Conn interface { + // Call invokes the target method and waits for a response. + // + // The params will be marshaled to JSON before sending over the wire, and will + // be handed to the method invoked. + // + // The response will be unmarshaled from JSON into the result. + // + // The id returned will be unique from this connection, and can be used for + // logging or tracking. + Call(ctx context.Context, method string, params, result interface{}) (ID, error) + + // Notify invokes the target method but does not wait for a response. + // + // The params will be marshaled to JSON before sending over the wire, and will + // be handed to the method invoked. + Notify(ctx context.Context, method string, params interface{}) error + + // Go starts a goroutine to handle the connection. + // + // It must be called exactly once for each Conn. It returns immediately. + // Must block on Done() to wait for the connection to shut down. + // + // This is a temporary measure, this should be started automatically in the + // future. + Go(ctx context.Context, handler Handler) + + // Close closes the connection and it's underlying stream. + // + // It does not wait for the close to complete, use the Done() channel for + // that. + Close() error + + // Done returns a channel that will be closed when the processing goroutine + // has terminated, which will happen if Close() is called or an underlying + // stream is closed. + Done() <-chan struct{} + + // Err returns an error if there was one from within the processing goroutine. + // + // If err returns non nil, the connection will be already closed or closing. + Err() error +} + +type conn struct { + seq int32 // access atomically + writeMu sync.Mutex // protects writes to the stream + stream Stream // supplied stream + pendingMu sync.Mutex // protects the pending map + pending map[ID]chan *Response // holds the pending response channel with the ID as the key. + + done chan struct{} // closed when done + err atomic.Value // holds run error +} + +// NewConn creates a new connection object around the supplied stream. +func NewConn(s Stream) Conn { + conn := &conn{ + stream: s, + pending: make(map[ID]chan *Response), + done: make(chan struct{}), + } + return conn +} + +// Call implements Conn. +func (c *conn) Call(ctx context.Context, method string, params, result interface{}) (id ID, err error) { + // generate a new request identifier + id = NewNumberID(atomic.AddInt32(&c.seq, 1)) + call, err := NewCall(id, method, params) + if err != nil { + return id, fmt.Errorf("marshaling call parameters: %w", err) + } + + // We have to add ourselves to the pending map before we send, otherwise we + // are racing the response. Also add a buffer to rchan, so that if we get a + // wire response between the time this call is cancelled and id is deleted + // from c.pending, the send to rchan will not block. + rchan := make(chan *Response, 1) + + c.pendingMu.Lock() + c.pending[id] = rchan + c.pendingMu.Unlock() + + defer func() { + c.pendingMu.Lock() + delete(c.pending, id) + c.pendingMu.Unlock() + }() + + // now we are ready to send + _, err = c.write(ctx, call) + if err != nil { + // sending failed, we will never get a response, so don't leave it pending + return id, err + } + + // now wait for the response + select { + case resp := <-rchan: + // is it an error response? + if resp.err != nil { + return id, resp.err + } + + if result == nil || len(resp.result) == 0 { + return id, nil + } + + dec := json.NewDecoder(bytes.NewReader(resp.result)) + dec.ZeroCopy() + if err := dec.Decode(result); err != nil { + return id, fmt.Errorf("unmarshaling result: %w", err) + } + + return id, nil + + case <-ctx.Done(): + return id, ctx.Err() + } +} + +// Notify implements Conn. +func (c *conn) Notify(ctx context.Context, method string, params interface{}) (err error) { + notify, err := NewNotification(method, params) + if err != nil { + return fmt.Errorf("marshaling notify parameters: %w", err) + } + + _, err = c.write(ctx, notify) + + return err +} + +func (c *conn) replier(req Message) Replier { + return func(ctx context.Context, result interface{}, err error) error { + call, ok := req.(*Call) + if !ok { + // request was a notify, no need to respond + return nil + } + + response, err := NewResponse(call.id, result, err) + if err != nil { + return err + } + + _, err = c.write(ctx, response) + if err != nil { + // TODO(iancottrell): if a stream write fails, we really need to shut down the whole stream + return err + } + return nil + } +} + +func (c *conn) write(ctx context.Context, msg Message) (int64, error) { + c.writeMu.Lock() + n, err := c.stream.Write(ctx, msg) + c.writeMu.Unlock() + if err != nil { + return 0, fmt.Errorf("write to stream: %w", err) + } + + return n, nil +} + +// Go implements Conn. +func (c *conn) Go(ctx context.Context, handler Handler) { + go c.run(ctx, handler) +} + +func (c *conn) run(ctx context.Context, handler Handler) { + defer close(c.done) + + for { + // get the next message + msg, _, err := c.stream.Read(ctx) + if err != nil { + // The stream failed, we cannot continue. + c.fail(err) + return + } + + switch msg := msg.(type) { + case Request: + if err := handler(ctx, c.replier(msg), msg); err != nil { + c.fail(err) + } + + case *Response: + // If method is not set, this should be a response, in which case we must + // have an id to send the response back to the caller. + c.pendingMu.Lock() + rchan, ok := c.pending[msg.id] + c.pendingMu.Unlock() + if ok { + rchan <- msg + } + } + } +} + +// Close implements Conn. +func (c *conn) Close() error { + return c.stream.Close() +} + +// Done implements Conn. +func (c *conn) Done() <-chan struct{} { + return c.done +} + +// Err implements Conn. +func (c *conn) Err() error { + if err := c.err.Load(); err != nil { + return err.(error) + } + return nil +} + +// fail sets a failure condition on the stream and closes it. +func (c *conn) fail(err error) { + c.err.Store(err) + c.stream.Close() +} diff --git a/vendor/go.lsp.dev/jsonrpc2/errors.go b/vendor/go.lsp.dev/jsonrpc2/errors.go new file mode 100644 index 00000000000..9c15d42865c --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/errors.go @@ -0,0 +1,70 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package jsonrpc2 + +import ( + "errors" + "fmt" + + "github.com/segmentio/encoding/json" +) + +// Error represents a JSON-RPC error. +type Error struct { + // Code a number indicating the error type that occurred. + Code Code `json:"code"` + + // Message a string providing a short description of the error. + Message string `json:"message"` + + // Data a Primitive or Structured value that contains additional + // information about the error. Can be omitted. + Data *json.RawMessage `json:"data,omitempty"` +} + +// compile time check whether the Error implements error interface. +var _ error = (*Error)(nil) + +// Error implements error.Error. +func (e *Error) Error() string { + if e == nil { + return "" + } + return e.Message +} + +// Unwrap implements errors.Unwrap. +// +// Returns the error underlying the receiver, which may be nil. +func (e *Error) Unwrap() error { return errors.New(e.Message) } + +// NewError builds a Error struct for the suppied code and message. +func NewError(c Code, message string) *Error { + return &Error{ + Code: c, + Message: message, + } +} + +// Errorf builds a Error struct for the suppied code, format and args. +func Errorf(c Code, format string, args ...interface{}) *Error { + return &Error{ + Code: c, + Message: fmt.Sprintf(format, args...), + } +} + +// constErr represents a error constant. +type constErr string + +// compile time check whether the constErr implements error interface. +var _ error = (*constErr)(nil) + +// Error implements error.Error. +func (e constErr) Error() string { return string(e) } + +const ( + // ErrIdleTimeout is returned when serving timed out waiting for new connections. + ErrIdleTimeout = constErr("timed out waiting for new connections") +) diff --git a/vendor/go.lsp.dev/jsonrpc2/handler.go b/vendor/go.lsp.dev/jsonrpc2/handler.go new file mode 100644 index 00000000000..3c4e32251de --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/handler.go @@ -0,0 +1,120 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package jsonrpc2 + +import ( + "context" + "fmt" + "sync" +) + +// Handler is invoked to handle incoming requests. +// +// The Replier sends a reply to the request and must be called exactly once. +type Handler func(ctx context.Context, reply Replier, req Request) error + +// Replier is passed to handlers to allow them to reply to the request. +// +// If err is set then result will be ignored. +type Replier func(ctx context.Context, result interface{}, err error) error + +// MethodNotFoundHandler is a Handler that replies to all call requests with the +// standard method not found response. +// +// This should normally be the final handler in a chain. +func MethodNotFoundHandler(ctx context.Context, reply Replier, req Request) error { + return reply(ctx, nil, fmt.Errorf("%q: %w", req.Method(), ErrMethodNotFound)) +} + +// ReplyHandler creates a Handler that panics if the wrapped handler does +// not call Reply for every request that it is passed. +func ReplyHandler(handler Handler) (h Handler) { + h = Handler(func(ctx context.Context, reply Replier, req Request) error { + called := false + err := handler(ctx, func(ctx context.Context, result interface{}, err error) error { + if called { + panic(fmt.Errorf("request %q replied to more than once", req.Method())) + } + called = true + + return reply(ctx, result, err) + }, req) + if !called { + panic(fmt.Errorf("request %q was never replied to", req.Method())) + } + return err + }) + + return h +} + +// CancelHandler returns a handler that supports cancellation, and a function +// that can be used to trigger canceling in progress requests. +func CancelHandler(handler Handler) (h Handler, canceller func(id ID)) { + var mu sync.Mutex + handling := make(map[ID]context.CancelFunc) + + h = Handler(func(ctx context.Context, reply Replier, req Request) error { + if call, ok := req.(*Call); ok { + cancelCtx, cancel := context.WithCancel(ctx) + ctx = cancelCtx + + mu.Lock() + handling[call.ID()] = cancel + mu.Unlock() + + innerReply := reply + reply = func(ctx context.Context, result interface{}, err error) error { + mu.Lock() + delete(handling, call.ID()) + mu.Unlock() + return innerReply(ctx, result, err) + } + } + return handler(ctx, reply, req) + }) + + canceller = func(id ID) { + mu.Lock() + cancel, found := handling[id] + mu.Unlock() + if found { + cancel() + } + } + + return h, canceller +} + +// AsyncHandler returns a handler that processes each request goes in its own +// goroutine. +// +// The handler returns immediately, without the request being processed. +// Each request then waits for the previous request to finish before it starts. +// +// This allows the stream to unblock at the cost of unbounded goroutines +// all stalled on the previous one. +func AsyncHandler(handler Handler) (h Handler) { + nextRequest := make(chan struct{}) + close(nextRequest) + + h = Handler(func(ctx context.Context, reply Replier, req Request) error { + waitForPrevious := nextRequest + nextRequest = make(chan struct{}) + unlockNext := nextRequest + innerReply := reply + reply = func(ctx context.Context, result interface{}, err error) error { + close(unlockNext) + return innerReply(ctx, result, err) + } + + go func() { + <-waitForPrevious + _ = handler(ctx, reply, req) + }() + return nil + }) + + return h +} diff --git a/vendor/go.lsp.dev/jsonrpc2/jsonrpc2.go b/vendor/go.lsp.dev/jsonrpc2/jsonrpc2.go new file mode 100644 index 00000000000..dafd0667251 --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/jsonrpc2.go @@ -0,0 +1,7 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +// Package jsonrpc2 is an implementation of the JSON-RPC 2 specification for Go. +// +// https://www.jsonrpc.org/specification +package jsonrpc2 // import "go.lsp.dev/jsonrpc2" diff --git a/vendor/go.lsp.dev/jsonrpc2/message.go b/vendor/go.lsp.dev/jsonrpc2/message.go new file mode 100644 index 00000000000..0f3fd54c600 --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/message.go @@ -0,0 +1,358 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package jsonrpc2 + +import ( + "bytes" + "errors" + "fmt" + + "github.com/segmentio/encoding/json" +) + +// Message is the interface to all JSON-RPC message types. +// +// They share no common functionality, but are a closed set of concrete types +// that are allowed to implement this interface. +// +// The message types are *Call, *Response and *Notification. +type Message interface { + // jsonrpc2Message is used to make the set of message implementations a + // closed set. + jsonrpc2Message() +} + +// Request is the shared interface to jsonrpc2 messages that request +// a method be invoked. +// +// The request types are a closed set of *Call and *Notification. +type Request interface { + Message + + // Method is a string containing the method name to invoke. + Method() string + // Params is either a struct or an array with the parameters of the method. + Params() json.RawMessage + + // jsonrpc2Request is used to make the set of request implementations closed. + jsonrpc2Request() +} + +// Call is a request that expects a response. +// +// The response will have a matching ID. +type Call struct { + // Method is a string containing the method name to invoke. + method string + // Params is either a struct or an array with the parameters of the method. + params json.RawMessage + // id of this request, used to tie the Response back to the request. + id ID +} + +// make sure a Call implements the Request, json.Marshaler and json.Unmarshaler and interfaces. +var ( + _ Request = (*Call)(nil) + _ json.Marshaler = (*Call)(nil) + _ json.Unmarshaler = (*Call)(nil) +) + +// NewCall constructs a new Call message for the supplied ID, method and +// parameters. +func NewCall(id ID, method string, params interface{}) (*Call, error) { + p, merr := marshalInterface(params) + req := &Call{ + id: id, + method: method, + params: p, + } + return req, merr +} + +// ID returns the current call id. +func (c *Call) ID() ID { return c.id } + +// Method implements Request. +func (c *Call) Method() string { return c.method } + +// Params implements Request. +func (c *Call) Params() json.RawMessage { return c.params } + +// jsonrpc2Message implements Request. +func (Call) jsonrpc2Message() {} + +// jsonrpc2Request implements Request. +func (Call) jsonrpc2Request() {} + +// MarshalJSON implements json.Marshaler. +func (c Call) MarshalJSON() ([]byte, error) { + req := wireRequest{ + Method: c.method, + Params: &c.params, + ID: &c.id, + } + data, err := json.Marshal(req) + if err != nil { + return data, fmt.Errorf("marshaling call: %w", err) + } + + return data, nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (c *Call) UnmarshalJSON(data []byte) error { + var req wireRequest + dec := json.NewDecoder(bytes.NewReader(data)) + dec.ZeroCopy() + if err := dec.Decode(&req); err != nil { + return fmt.Errorf("unmarshaling call: %w", err) + } + + c.method = req.Method + if req.Params != nil { + c.params = *req.Params + } + if req.ID != nil { + c.id = *req.ID + } + + return nil +} + +// Response is a reply to a Request. +// +// It will have the same ID as the call it is a response to. +type Response struct { + // result is the content of the response. + result json.RawMessage + // err is set only if the call failed. + err error + // ID of the request this is a response to. + id ID +} + +// make sure a Response implements the Message, json.Marshaler and json.Unmarshaler and interfaces. +var ( + _ Message = (*Response)(nil) + _ json.Marshaler = (*Response)(nil) + _ json.Unmarshaler = (*Response)(nil) +) + +// NewResponse constructs a new Response message that is a reply to the +// supplied. If err is set result may be ignored. +func NewResponse(id ID, result interface{}, err error) (*Response, error) { + r, merr := marshalInterface(result) + resp := &Response{ + id: id, + result: r, + err: err, + } + return resp, merr +} + +// ID returns the current response id. +func (r *Response) ID() ID { return r.id } + +// Result returns the Response result. +func (r *Response) Result() json.RawMessage { return r.result } + +// Err returns the Response error. +func (r *Response) Err() error { return r.err } + +// jsonrpc2Message implements Message. +func (r *Response) jsonrpc2Message() {} + +// MarshalJSON implements json.Marshaler. +func (r Response) MarshalJSON() ([]byte, error) { + resp := &wireResponse{ + Error: toError(r.err), + ID: &r.id, + } + if resp.Error == nil { + resp.Result = &r.result + } + + data, err := json.Marshal(resp) + if err != nil { + return data, fmt.Errorf("marshaling notification: %w", err) + } + + return data, nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (r *Response) UnmarshalJSON(data []byte) error { + var resp wireResponse + dec := json.NewDecoder(bytes.NewReader(data)) + dec.ZeroCopy() + if err := dec.Decode(&resp); err != nil { + return fmt.Errorf("unmarshaling jsonrpc response: %w", err) + } + + if resp.Result != nil { + r.result = *resp.Result + } + if resp.Error != nil { + r.err = resp.Error + } + if resp.ID != nil { + r.id = *resp.ID + } + + return nil +} + +func toError(err error) *Error { + if err == nil { + // no error, the response is complete + return nil + } + + var wrapped *Error + if errors.As(err, &wrapped) { + // already a wire error, just use it + return wrapped + } + + result := &Error{Message: err.Error()} + if errors.As(err, &wrapped) { + // if we wrapped a wire error, keep the code from the wrapped error + // but the message from the outer error + result.Code = wrapped.Code + } + + return result +} + +// Notification is a request for which a response cannot occur, and as such +// it has not ID. +type Notification struct { + // Method is a string containing the method name to invoke. + method string + + params json.RawMessage +} + +// make sure a Notification implements the Request, json.Marshaler and json.Unmarshaler and interfaces. +var ( + _ Request = (*Notification)(nil) + _ json.Marshaler = (*Notification)(nil) + _ json.Unmarshaler = (*Notification)(nil) +) + +// NewNotification constructs a new Notification message for the supplied +// method and parameters. +func NewNotification(method string, params interface{}) (*Notification, error) { + p, merr := marshalInterface(params) + notify := &Notification{ + method: method, + params: p, + } + return notify, merr +} + +// Method implements Request. +func (n *Notification) Method() string { return n.method } + +// Params implements Request. +func (n *Notification) Params() json.RawMessage { return n.params } + +// jsonrpc2Message implements Request. +func (Notification) jsonrpc2Message() {} + +// jsonrpc2Request implements Request. +func (Notification) jsonrpc2Request() {} + +// MarshalJSON implements json.Marshaler. +func (n Notification) MarshalJSON() ([]byte, error) { + req := wireRequest{ + Method: n.method, + Params: &n.params, + } + data, err := json.Marshal(req) + if err != nil { + return data, fmt.Errorf("marshaling notification: %w", err) + } + + return data, nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (n *Notification) UnmarshalJSON(data []byte) error { + var req wireRequest + dec := json.NewDecoder(bytes.NewReader(data)) + dec.ZeroCopy() + if err := dec.Decode(&req); err != nil { + return fmt.Errorf("unmarshaling notification: %w", err) + } + + n.method = req.Method + if req.Params != nil { + n.params = *req.Params + } + + return nil +} + +// DecodeMessage decodes data to Message. +func DecodeMessage(data []byte) (Message, error) { + var msg combined + dec := json.NewDecoder(bytes.NewReader(data)) + dec.ZeroCopy() + if err := dec.Decode(&msg); err != nil { + return nil, fmt.Errorf("unmarshaling jsonrpc message: %w", err) + } + + if msg.Method == "" { + // no method, should be a response + if msg.ID == nil { + return nil, ErrInvalidRequest + } + + resp := &Response{ + id: *msg.ID, + } + if msg.Error != nil { + resp.err = msg.Error + } + if msg.Result != nil { + resp.result = *msg.Result + } + + return resp, nil + } + + // has a method, must be a request + if msg.ID == nil { + // request with no ID is a notify + notify := &Notification{ + method: msg.Method, + } + if msg.Params != nil { + notify.params = *msg.Params + } + + return notify, nil + } + + // request with an ID, must be a call + call := &Call{ + method: msg.Method, + id: *msg.ID, + } + if msg.Params != nil { + call.params = *msg.Params + } + + return call, nil +} + +// marshalInterface marshal obj to json.RawMessage. +func marshalInterface(obj interface{}) (json.RawMessage, error) { + data, err := json.Marshal(obj) + if err != nil { + return json.RawMessage{}, fmt.Errorf("failed to marshal json: %w", err) + } + return json.RawMessage(data), nil +} diff --git a/vendor/go.lsp.dev/jsonrpc2/serve.go b/vendor/go.lsp.dev/jsonrpc2/serve.go new file mode 100644 index 00000000000..3fc28dd6574 --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/serve.go @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package jsonrpc2 + +import ( + "context" + "fmt" + "net" + "os" + "time" +) + +// NOTE: This file provides an experimental API for serving multiple remote +// jsonrpc2 clients over the network. For now, it is intentionally similar to +// net/http, but that may change in the future as we figure out the correct +// semantics. + +// StreamServer is used to serve incoming jsonrpc2 clients communicating over +// a newly created connection. +type StreamServer interface { + ServeStream(context.Context, Conn) error +} + +// ServerFunc is an adapter that implements the StreamServer interface +// using an ordinary function. +type ServerFunc func(context.Context, Conn) error + +// ServeStream implements StreamServer. +// +// ServeStream calls f(ctx, s). +func (f ServerFunc) ServeStream(ctx context.Context, c Conn) error { + return f(ctx, c) +} + +// HandlerServer returns a StreamServer that handles incoming streams using the +// provided handler. +func HandlerServer(h Handler) StreamServer { + return ServerFunc(func(ctx context.Context, conn Conn) error { + conn.Go(ctx, h) + <-conn.Done() + return conn.Err() + }) +} + +// ListenAndServe starts an jsonrpc2 server on the given address. +// +// If idleTimeout is non-zero, ListenAndServe exits after there are no clients for +// this duration, otherwise it exits only on error. +func ListenAndServe(ctx context.Context, network, addr string, server StreamServer, idleTimeout time.Duration) error { + ln, err := net.Listen(network, addr) + if err != nil { + return fmt.Errorf("failed to listen %s:%s: %w", network, addr, err) + } + defer ln.Close() + + if network == "unix" { + defer os.Remove(addr) + } + + return Serve(ctx, ln, server, idleTimeout) +} + +// Serve accepts incoming connections from the network, and handles them using +// the provided server. If idleTimeout is non-zero, ListenAndServe exits after +// there are no clients for this duration, otherwise it exits only on error. +func Serve(ctx context.Context, ln net.Listener, server StreamServer, idleTimeout time.Duration) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Max duration: ~290 years; surely that's long enough. + const forever = 1<<63 - 1 + if idleTimeout <= 0 { + idleTimeout = forever + } + connTimer := time.NewTimer(idleTimeout) + + newConns := make(chan net.Conn) + doneListening := make(chan error) + closedConns := make(chan error) + + go func() { + for { + nc, err := ln.Accept() + if err != nil { + select { + case doneListening <- fmt.Errorf("accept: %w", err): + case <-ctx.Done(): + } + return + } + + newConns <- nc + } + }() + + activeConns := 0 + for { + select { + case netConn := <-newConns: + activeConns++ + connTimer.Stop() + stream := NewStream(netConn) + go func() { + conn := NewConn(stream) + closedConns <- server.ServeStream(ctx, conn) + stream.Close() + }() + + case err := <-doneListening: + return err + + case <-closedConns: + // if !isClosingError(err) { + // } + + activeConns-- + if activeConns == 0 { + connTimer.Reset(idleTimeout) + } + + case <-connTimer.C: + return ErrIdleTimeout + + case <-ctx.Done(): + return ctx.Err() + } + } +} diff --git a/vendor/go.lsp.dev/jsonrpc2/stream.go b/vendor/go.lsp.dev/jsonrpc2/stream.go new file mode 100644 index 00000000000..b05ab6bbfc4 --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/stream.go @@ -0,0 +1,226 @@ +// SPDX-FileCopyrightText: 2018 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package jsonrpc2 + +import ( + "bufio" + "context" + stdjson "encoding/json" + "fmt" + "io" + "strconv" + "strings" + + "github.com/segmentio/encoding/json" +) + +const ( + // HdrContentLength is the HTTP header name of the length of the content part in bytes. This header is required. + // This entity header indicates the size of the entity-body, in bytes, sent to the recipient. + // + // RFC 7230, section 3.3.2: Content-Length: + // https://tools.ietf.org/html/rfc7230#section-3.3.2 + HdrContentLength = "Content-Length" + + // HeaderContentType is the mime type of the content part. Defaults to "application/vscode-jsonrpc; charset=utf-8". + // This entity header is used to indicate the media type of the resource. + // + // RFC 7231, section 3.1.1.5: Content-Type: + // https://tools.ietf.org/html/rfc7231#section-3.1.1.5 + HdrContentType = "Content-Type" + + // HeaderContentSeparator is the header and content part separator. + HdrContentSeparator = "\r\n\r\n" +) + +// Framer wraps a network connection up into a Stream. +// +// It is responsible for the framing and encoding of messages into wire form. +// NewRawStream and NewStream are implementations of a Framer. +type Framer func(conn io.ReadWriteCloser) Stream + +// Stream abstracts the transport mechanics from the JSON RPC protocol. +// +// A Conn reads and writes messages using the stream it was provided on +// construction, and assumes that each call to Read or Write fully transfers +// a single message, or returns an error. +// +// A stream is not safe for concurrent use, it is expected it will be used by +// a single Conn in a safe manner. +type Stream interface { + // Read gets the next message from the stream. + Read(context.Context) (Message, int64, error) + + // Write sends a message to the stream. + Write(context.Context, Message) (int64, error) + + // Close closes the connection. + // Any blocked Read or Write operations will be unblocked and return errors. + Close() error +} + +type rawStream struct { + conn io.ReadWriteCloser + in *stdjson.Decoder +} + +// NewRawStream returns a Stream built on top of a io.ReadWriteCloser. +// +// The messages are sent with no wrapping, and rely on json decode consistency +// to determine message boundaries. +func NewRawStream(conn io.ReadWriteCloser) Stream { + return &rawStream{ + conn: conn, + in: stdjson.NewDecoder(conn), // TODO(zchee): why test fail using segmentio json.Decoder? + } +} + +// Read implements Stream.Read. +func (s *rawStream) Read(ctx context.Context) (Message, int64, error) { + select { + case <-ctx.Done(): + return nil, 0, ctx.Err() + default: + } + + var raw stdjson.RawMessage + if err := s.in.Decode(&raw); err != nil { + return nil, 0, fmt.Errorf("decoding raw message: %w", err) + } + + msg, err := DecodeMessage(raw) + return msg, int64(len(raw)), err +} + +// Write implements Stream.Write. +func (s *rawStream) Write(ctx context.Context, msg Message) (int64, error) { + select { + case <-ctx.Done(): + return 0, ctx.Err() + default: + } + + data, err := json.Marshal(msg) + if err != nil { + return 0, fmt.Errorf("marshaling message: %w", err) + } + + n, err := s.conn.Write(data) + if err != nil { + return 0, fmt.Errorf("write to stream: %w", err) + } + + return int64(n), nil +} + +// Close implements Stream.Close. +func (s *rawStream) Close() error { + return s.conn.Close() +} + +type stream struct { + conn io.ReadWriteCloser + in *bufio.Reader +} + +// NewStream returns a Stream built on top of a io.ReadWriteCloser. +// +// The messages are sent with HTTP content length and MIME type headers. +// This is the format used by LSP and others. +func NewStream(conn io.ReadWriteCloser) Stream { + return &stream{ + conn: conn, + in: bufio.NewReader(conn), + } +} + +// Read implements Stream.Read. +func (s *stream) Read(ctx context.Context) (Message, int64, error) { + select { + case <-ctx.Done(): + return nil, 0, ctx.Err() + default: + } + + var total int64 + var length int64 + // read the header, stop on the first empty line + for { + line, err := s.in.ReadString('\n') + total += int64(len(line)) + if err != nil { + return nil, total, fmt.Errorf("failed reading header line: %w", err) + } + + line = strings.TrimSpace(line) + // check we have a header line + if line == "" { + break + } + + colon := strings.IndexRune(line, ':') + if colon < 0 { + return nil, total, fmt.Errorf("invalid header line %q", line) + } + + name, value := line[:colon], strings.TrimSpace(line[colon+1:]) + switch name { + case HdrContentLength: + if length, err = strconv.ParseInt(value, 10, 32); err != nil { + return nil, total, fmt.Errorf("failed parsing %s: %v: %w", HdrContentLength, value, err) + } + if length <= 0 { + return nil, total, fmt.Errorf("invalid %s: %v", HdrContentLength, length) + } + default: + // ignoring unknown headers + } + } + + if length == 0 { + return nil, total, fmt.Errorf("missing %s header", HdrContentLength) + } + + data := make([]byte, length) + if _, err := io.ReadFull(s.in, data); err != nil { + return nil, total, fmt.Errorf("read full of data: %w", err) + } + + total += length + msg, err := DecodeMessage(data) + return msg, total, err +} + +// Write implements Stream.Write. +func (s *stream) Write(ctx context.Context, msg Message) (int64, error) { + select { + case <-ctx.Done(): + return 0, ctx.Err() + default: + } + + data, err := json.Marshal(msg) + if err != nil { + return 0, fmt.Errorf("marshaling message: %w", err) + } + + n, err := fmt.Fprintf(s.conn, "%s: %v%s", HdrContentLength, len(data), HdrContentSeparator) + total := int64(n) + if err != nil { + return 0, fmt.Errorf("write data to conn: %w", err) + } + + n, err = s.conn.Write(data) + total += int64(n) + if err != nil { + return 0, fmt.Errorf("write data to conn: %w", err) + } + + return total, nil +} + +// Close implements Stream.Close. +func (s *stream) Close() error { + return s.conn.Close() +} diff --git a/vendor/go.lsp.dev/jsonrpc2/wire.go b/vendor/go.lsp.dev/jsonrpc2/wire.go new file mode 100644 index 00000000000..1fb3f7f2c14 --- /dev/null +++ b/vendor/go.lsp.dev/jsonrpc2/wire.go @@ -0,0 +1,140 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package jsonrpc2 + +import ( + "fmt" + + "github.com/segmentio/encoding/json" +) + +// Version represents a JSON-RPC version. +const Version = "2.0" + +// version is a special 0 sized struct that encodes as the jsonrpc version tag. +// +// It will fail during decode if it is not the correct version tag in the stream. +type version struct{} + +// compile time check whether the version implements a json.Marshaler and json.Unmarshaler interfaces. +var ( + _ json.Marshaler = (*version)(nil) + _ json.Unmarshaler = (*version)(nil) +) + +// MarshalJSON implements json.Marshaler. +func (version) MarshalJSON() ([]byte, error) { + return json.Marshal(Version) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (version) UnmarshalJSON(data []byte) error { + version := "" + if err := json.Unmarshal(data, &version); err != nil { + return fmt.Errorf("failed to Unmarshal: %w", err) + } + if version != Version { + return fmt.Errorf("invalid RPC version %v", version) + } + return nil +} + +// ID is a Request identifier. +// +// Only one of either the Name or Number members will be set, using the +// number form if the Name is the empty string. +type ID struct { + name string + number int32 +} + +// compile time check whether the ID implements a fmt.Formatter, json.Marshaler and json.Unmarshaler interfaces. +var ( + _ fmt.Formatter = (*ID)(nil) + _ json.Marshaler = (*ID)(nil) + _ json.Unmarshaler = (*ID)(nil) +) + +// NewNumberID returns a new number request ID. +func NewNumberID(v int32) ID { return ID{number: v} } + +// NewStringID returns a new string request ID. +func NewStringID(v string) ID { return ID{name: v} } + +// Format writes the ID to the formatter. +// +// If the rune is q the representation is non ambiguous, +// string forms are quoted, number forms are preceded by a #. +func (id ID) Format(f fmt.State, r rune) { + numF, strF := `%d`, `%s` + if r == 'q' { + numF, strF = `#%d`, `%q` + } + + switch { + case id.name != "": + fmt.Fprintf(f, strF, id.name) + default: + fmt.Fprintf(f, numF, id.number) + } +} + +// MarshalJSON implements json.Marshaler. +func (id *ID) MarshalJSON() ([]byte, error) { + if id.name != "" { + return json.Marshal(id.name) + } + return json.Marshal(id.number) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (id *ID) UnmarshalJSON(data []byte) error { + *id = ID{} + if err := json.Unmarshal(data, &id.number); err == nil { + return nil + } + return json.Unmarshal(data, &id.name) +} + +// wireRequest is sent to a server to represent a Call or Notify operaton. +type wireRequest struct { + // VersionTag is always encoded as the string "2.0" + VersionTag version `json:"jsonrpc"` + // Method is a string containing the method name to invoke. + Method string `json:"method"` + // Params is either a struct or an array with the parameters of the method. + Params *json.RawMessage `json:"params,omitempty"` + // The id of this request, used to tie the Response back to the request. + // Will be either a string or a number. If not set, the Request is a notify, + // and no response is possible. + ID *ID `json:"id,omitempty"` +} + +// wireResponse is a reply to a Request. +// +// It will always have the ID field set to tie it back to a request, and will +// have either the Result or Error fields set depending on whether it is a +// success or failure wireResponse. +type wireResponse struct { + // VersionTag is always encoded as the string "2.0" + VersionTag version `json:"jsonrpc"` + // Result is the response value, and is required on success. + Result *json.RawMessage `json:"result,omitempty"` + // Error is a structured error response if the call fails. + Error *Error `json:"error,omitempty"` + // ID must be set and is the identifier of the Request this is a response to. + ID *ID `json:"id,omitempty"` +} + +// combined has all the fields of both Request and Response. +// +// We can decode this and then work out which it is. +type combined struct { + VersionTag version `json:"jsonrpc"` + ID *ID `json:"id,omitempty"` + Method string `json:"method"` + Params *json.RawMessage `json:"params,omitempty"` + Result *json.RawMessage `json:"result,omitempty"` + Error *Error `json:"error,omitempty"` +} diff --git a/vendor/go.lsp.dev/pkg/LICENSE b/vendor/go.lsp.dev/pkg/LICENSE new file mode 100644 index 00000000000..4551f8c8a69 --- /dev/null +++ b/vendor/go.lsp.dev/pkg/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2020, The Go Language Server Authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/go.lsp.dev/pkg/xcontext/xcontext.go b/vendor/go.lsp.dev/pkg/xcontext/xcontext.go new file mode 100644 index 00000000000..df9017e0ca2 --- /dev/null +++ b/vendor/go.lsp.dev/pkg/xcontext/xcontext.go @@ -0,0 +1,22 @@ +// Copyright 2020 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +// Package xcontext is a package to offer the extra functionality we need +// from contexts that is not available from the standard context package. +package xcontext + +import ( + "context" + "time" +) + +// Detach returns a context that keeps all the values of its parent context +// but detaches from the cancellation and error handling. +func Detach(ctx context.Context) context.Context { return detachedContext{ctx} } + +type detachedContext struct{ parent context.Context } + +func (v detachedContext) Deadline() (time.Time, bool) { return time.Time{}, false } +func (v detachedContext) Done() <-chan struct{} { return nil } +func (v detachedContext) Err() error { return nil } +func (v detachedContext) Value(key interface{}) interface{} { return v.parent.Value(key) } diff --git a/vendor/go.lsp.dev/protocol/.codecov.yml b/vendor/go.lsp.dev/protocol/.codecov.yml new file mode 100644 index 00000000000..dd726bf10fc --- /dev/null +++ b/vendor/go.lsp.dev/protocol/.codecov.yml @@ -0,0 +1,45 @@ +codecov: + allow_coverage_offsets: true + +parsers: + go: + partials_as_hits: true + +coverage: + precision: 1 + round: down + range: "70...100" + + status: + default_rules: + flag_coverage_not_uploaded_behavior: include + + project: + default: + target: auto + threshold: 1% + if_not_found: success + if_ci_failed: error + + patch: + default: + only_pulls: true + target: 50% + threshold: 10% + + changes: + default: + target: auto + threshold: 10% + if_not_found: success + if_ci_failed: error + branches: + - main + +comment: + behavior: default + require_changes: true + show_carryforward_flags: true + +github_checks: + annotations: true diff --git a/vendor/go.lsp.dev/protocol/.gitattributes b/vendor/go.lsp.dev/protocol/.gitattributes new file mode 100644 index 00000000000..a7b063e2172 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/.gitattributes @@ -0,0 +1,11 @@ +# go.lsp.dev/protocol project gitattributes file +# https://github.com/github/linguist#using-gitattributes +# https://github.com/github/linguist/blob/master/lib/linguist/languages.yml + +# To prevent CRLF breakages on Windows for fragile files, like testdata. +* -text + +docs/ linguist-documentation +*.pb.go linguist-generated +*_gen.go linguist-generated +*_string.go linguist-generated diff --git a/vendor/go.lsp.dev/protocol/.gitignore b/vendor/go.lsp.dev/protocol/.gitignore new file mode 100644 index 00000000000..54e59b7493a --- /dev/null +++ b/vendor/go.lsp.dev/protocol/.gitignore @@ -0,0 +1,52 @@ +# go.lsp.dev/protocol project generated files to ignore +# if you want to ignore files created by your editor/tools, +# please consider a global .gitignore https://help.github.com/articles/ignoring-files +# please do not open a pull request to add something created by your editor or tools + +# github/gitignore/Go.gitignore +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +vendor/ + +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +# cgo generated +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +# test generated +_testmain.go + +# profile +*.pprof + +# coverage +coverage.* + +# tools +tools/bin/ diff --git a/vendor/go.lsp.dev/protocol/.golangci.yml b/vendor/go.lsp.dev/protocol/.golangci.yml new file mode 100644 index 00000000000..8667ed4cdc0 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/.golangci.yml @@ -0,0 +1,242 @@ +# https://golangci-lint.run/usage/configuration/ +# https://github.com/golangci/golangci-lint/blob/master/pkg/config/linters_settings.go +--- +run: + timeout: 1m + issues-exit-code: 1 + tests: true + skip-dirs: [] + skip-dirs-use-default: true + skip-files: [] + allow-parallel-runners: true + +output: + format: colored-line-number + print-issued-lines: true + print-linter-name: true + uniq-by-line: true + sort-results: true + +linters-settings: + depguard: + list-type: blacklist + include-go-root: false + dupl: + threshold: 150 + errcheck: + check-type-assertions: true + check-blank: true + # exclude: .errcheckignore + errorlint: + errorf: true + asserts: true + comparison: true + funlen: + lines: 100 + statements: 60 + gocognit: + min-complexity: 30 + goconst: + min-len: 3 + min-occurrences: 3 + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + disabled-checks: + - commentedOutCode + - redundantSprint + - whyNoLint + settings: + hugeParam: + sizeThreshold: 80 + rangeExprCopy: + sizeThreshold: 512 + rangeValCopy: + sizeThreshold: 128 + gocyclo: + min-complexity: 30 + godot: + scope: declarations + capital: false + gofmt: + simplify: true + gofumpt: + extra-rules: true + goheader: + values: + const: + AUTHOR: Go Language Server + regexp: + YEAR: '20\d\d' + template: |- + SPDX-FileCopyrightText: {{ YEAR }} The {{ AUTHOR }} Authors + SPDX-License-Identifier: BSD-3-Clause + goimports: + local-prefixes: go.lsp.dev/protocol + gosimple: + go: 1.16 + govet: + enable-all: true + check-shadowing: true + disable: + - fieldalignment + importas: + alias: [] + no-unaliased: true + lll: + line-length: 120 + tab-width: 1 + misspell: + locale: US + ignore-words: + - cancelled + - cancelling + nakedret: + max-func-lines: 30 + nestif: + min-complexity: 4 + prealloc: + simple: true + range-loops: true + for-loops: true + staticcheck: + go: 1.16 + testpackage: + skip-regexp: '.*(export)_test\.go' + unparam: + check-exported: true + algo: cha + unused: + go: 1.16 + whitespace: + multi-if: true + multi-func: true + +linters: + fast: false + disabled: + - exhaustivestruct # Checks if all struct's fields are initialized + - forbidigo # Forbids identifiers + - forcetypeassert # finds forced type assertions + - gci # Gci control golang package import order and make it always deterministic. + - gochecknoglobals # check that no global variables exist + - gochecknoinits # Checks that no init functions are present in Go code + - goconst # Finds repeated strings that could be replaced by a constant + - godox # Tool for detection of FIXME, TODO and other comment keywords + - goerr113 # Golang linter to check the errors handling expressions + - golint # Golint differs from gofmt. Gofmt reformats Go source code, whereas golint prints out style mistakes + - gomnd # An analyzer to detect magic numbers. + - gomoddirectives # Manage the use of 'replace', 'retract', and 'excludes' directives in go.mod. + - gomodguard # Allow and block list linter for direct Go module dependencies. + - interfacer # Linter that suggests narrower interface types + - lll # Reports long lines + - maligned # Tool to detect Go structs that would take less memory if their fields were sorted + - promlinter # Check Prometheus metrics naming via promlint + - scopelint # Scopelint checks for unpinned variables in go programs + - sqlclosecheck # Checks that sql.Rows and sql.Stmt are closed. + - testpackage # TODO(zchee): enable: # linter that makes you use a separate _test package + - tparallel # tparallel detects inappropriate usage of t.Parallel() method in your Go test codes + - wrapcheck # TODO(zchee): enable: # Checks that errors returned from external packages are wrapped + - wsl # Whitespace Linter + enable: + - asciicheck # Simple linter to check that your code does not contain non-ASCII identifiers + - bodyclose # checks whether HTTP response body is closed successfully + - cyclop # checks function and package cyclomatic complexity + - deadcode # Finds unused code + - depguard # Go linter that checks if package imports are in a list of acceptable packages + - dogsled # Checks assignments with too many blank identifiers (e.g. x, _, _, _, := f()) + - dupl # Tool for code clone detection + - durationcheck # check for two durations multiplied together + - errcheck # Errcheck is a program for checking for unchecked errors in go programs. These unchecked errors can be critical bugs in some cases + - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13. + - exhaustive # check exhaustiveness of enum switch statements + - exportloopref # checks for pointers to enclosing loop variables + - funlen # Tool for detection of long functions + - gocognit # Computes and checks the cognitive complexity of functions + - gocritic # Provides many diagnostics that check for bugs, performance and style issues. + - gocyclo # Computes and checks the cyclomatic complexity of functions + - godot # Check if comments end in a period + - gofmt # Gofmt checks whether code was gofmt-ed. By default this tool runs with -s option to check for code simplification + - gofumpt # Gofumpt checks whether code was gofumpt-ed. + - goheader # Checks is file header matches to pattern + - goimports # Goimports does everything that gofmt does. Additionally it checks unused imports + - goprintffuncname # Checks that printf-like functions are named with `f` at the end + - gosec # Inspects source code for security problems + - gosimple # Linter for Go source code that specializes in simplifying a code + - govet # Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string + - ifshort # Checks that your code uses short syntax for if-statements whenever possible + - importas # Enforces consistent import aliases + - ineffassign # Detects when assignments to existing variables are not used + - makezero # Finds slice declarations with non-zero initial length + - misspell # Finds commonly misspelled English words in comments + - nakedret # Finds naked returns in functions greater than a specified function length + - nestif # Reports deeply nested if statements + - nilerr # Finds the code that returns nil even if it checks that the error is not nil. + - nlreturn # nlreturn checks for a new line before return and branch statements to increase code clarity + - noctx # noctx finds sending http request without context.Context + - nolintlint # Reports ill-formed or insufficient nolint directives + - paralleltest # paralleltest detects missing usage of t.Parallel() method in your Go test + - prealloc # Finds slice declarations that could potentially be preallocated + - predeclared # find code that shadows one of Go's predeclared identifiers + - revive # Fast, configurable, extensible, flexible, and beautiful linter for Go. Drop-in replacement of golint. + - rowserrcheck # checks whether Err of rows is checked successfully + - staticcheck # Staticcheck is a go vet on steroids, applying a ton of static analysis checks + - structcheck # Finds unused struct fields + - stylecheck # Stylecheck is a replacement for golint + - tagliatelle # Checks the struct tags. + - thelper # thelper detects golang test helpers without t.Helper() call and checks the consistency of test helpers + - typecheck # Like the front-end of a Go compiler, parses and type-checks Go code + - unconvert # Remove unnecessary type conversions + - unparam # Reports unused function parameters + - unused # Checks Go code for unused constants, variables, functions and types + - varcheck # Finds unused global variables and constants + - wastedassign # wastedassign finds wasted assignment statements. + - whitespace # Tool for detection of leading and trailing whitespace + +issues: + max-issues-per-linter: 0 + max-same-issues: 0 + exclude-use-default: true + exclude-rules: + - path: _test\.go + linters: + - cyclop + - dupl + - errcheck + - funlen + - gocognit + - goconst + - gocritic + - gocyclo + - gosec + - thelper + - wrapcheck + - path: "(.*)?_example_test.go" + linters: + - gocritic + # Exclude shadow checking on the variable named err + - text: "shadow: declaration of \"(err|ok)\"" + linters: + - govet + # false positive + - path: language.go + text: "deprecatedComment: the proper format is `Deprecated: `" + # async + - path: handler.go + text: "Error return value of `conn.Notify` is not checked" + linters: + - errcheck + - path: log.go + text: "Error return value of `s.log.Write` is not checked" + linters: + - errcheck + - path: deprecated.go + linters: + - lll + - path: "(client|server)_json.go" + linters: + - nlreturn diff --git a/vendor/go.lsp.dev/protocol/LICENSE b/vendor/go.lsp.dev/protocol/LICENSE new file mode 100644 index 00000000000..e8748709cfb --- /dev/null +++ b/vendor/go.lsp.dev/protocol/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2019, The Go Language Server Authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/go.lsp.dev/protocol/Makefile b/vendor/go.lsp.dev/protocol/Makefile new file mode 100644 index 00000000000..f08992fc093 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/Makefile @@ -0,0 +1,126 @@ +# ----------------------------------------------------------------------------- +# global + +.DEFAULT_GOAL := test +comma := , +empty := +space := $(empty) $(empty) + +# ----------------------------------------------------------------------------- +# go + +GO_PATH ?= $(shell go env GOPATH) + +PKG := $(subst $(GO_PATH)/src/,,$(CURDIR)) +CGO_ENABLED ?= 0 +GO_BUILDTAGS=osusergo,netgo,static +GO_LDFLAGS=-s -w "-extldflags=-static" +GO_FLAGS ?= -tags='$(subst $(space),$(comma),${GO_BUILDTAGS})' -ldflags='${GO_LDFLAGS}' -installsuffix=netgo + +TOOLS_DIR := ${CURDIR}/tools +TOOLS_BIN := ${TOOLS_DIR}/bin +TOOLS := $(shell cd ${TOOLS_DIR} && go list -v -x -f '{{ join .Imports " " }}' -tags=tools) + +GO_PKGS := ./... + +GO_TEST ?= ${TOOLS_BIN}/gotestsum -- +GO_TEST_PKGS ?= $(shell go list -f='{{if or .TestGoFiles .XTestGoFiles}}{{.ImportPath}}{{end}}' ./...) +GO_TEST_FLAGS ?= -race -count=1 +GO_TEST_FUNC ?= . +GO_BENCH_FLAGS ?= -benchmem +GO_BENCH_FUNC ?= . +GO_LINT_FLAGS ?= + +# Set build environment +JOBS := $(shell getconf _NPROCESSORS_CONF) + +# ----------------------------------------------------------------------------- +# defines + +define target +@printf "+ $(patsubst ,$@,$(1))\\n" >&2 +endef + +# ----------------------------------------------------------------------------- +# target + +##@ test, bench, coverage + +export GOTESTSUM_FORMAT=standard-verbose + +.PHONY: test +test: CGO_ENABLED=1 +test: tools/bin/gotestsum ## Runs package test including race condition. + $(call target) + @CGO_ENABLED=${CGO_ENABLED} ${GO_TEST} ${GO_TEST_FLAGS} -run=${GO_TEST_FUNC} -tags='$(subst $(space),$(comma),${GO_BUILDTAGS})' ${GO_TEST_PKGS} + +.PHONY: coverage +coverage: CGO_ENABLED=1 +coverage: tools/bin/gotestsum ## Takes packages test coverage. + $(call target) + CGO_ENABLED=${CGO_ENABLED} ${GO_TEST} ${GO_TEST_FLAGS} -covermode=atomic -coverpkg=./... -coverprofile=coverage.out $(strip ${GO_FLAGS}) ${GO_PKGS} + + +##@ fmt, lint + +.PHONY: lint +lint: fmt lint/golangci-lint ## Run all linters. + +.PHONY: fmt +fmt: tools/goimportz tools/gofumpt ## Run goimportz and gofumpt. + $(call target) + find . -iname "*.go" -not -path "./vendor/**" | xargs -P ${JOBS} ${TOOLS_BIN}/goimportz -local=${PKG},$(subst /protocol,,$(PKG)) -w + find . -iname "*.go" -not -path "./vendor/**" | xargs -P ${JOBS} ${TOOLS_BIN}/gofumpt -extra -w + +.PHONY: lint/golangci-lint +lint/golangci-lint: tools/golangci-lint .golangci.yml ## Run golangci-lint. + $(call target) + ${TOOLS_BIN}/golangci-lint -j ${JOBS} run $(strip ${GO_LINT_FLAGS}) ./... + + +##@ tools + +.PHONY: tools +tools: tools/bin/'' ## Install tools + +tools/%: ## install an individual dependent tool + @${MAKE} tools/bin/$* 1>/dev/null + +tools/bin/%: ${TOOLS_DIR}/go.mod ${TOOLS_DIR}/go.sum + @cd tools; \ + for t in ${TOOLS}; do \ + if [ -z '$*' ] || [ $$(basename $$t) = '$*' ]; then \ + echo "Install $$t ..." >&2; \ + GOBIN=${TOOLS_BIN} CGO_ENABLED=0 go install -mod=mod ${GO_FLAGS} "$${t}"; \ + fi \ + done + + +##@ clean + +.PHONY: clean +clean: ## Cleanups binaries and extra files in the package. + $(call target) + @rm -rf *.out *.test *.prof trace.txt ${TOOLS_BIN} + + +##@ miscellaneous + +.PHONY: todo +TODO: ## Print the all of (TODO|BUG|XXX|FIXME|NOTE) in packages. + @grep -E '(TODO|BUG|XXX|FIXME)(\(.+\):|:)' $(shell find . -type f -name '*.go' -and -not -iwholename '*vendor*') + +.PHONY: nolint +nolint: ## Print the all of //nolint:... pragma in packages. + @grep -E -C 3 '//nolint.+' $(shell find . -type f -name '*.go' -and -not -iwholename '*vendor*' -and -not -iwholename '*internal*') + +.PHONY: env/% +env/%: ## Print the value of MAKEFILE_VARIABLE. Use `make env/GO_FLAGS` or etc. + @echo $($*) + + +##@ help + +.PHONY: help +help: ## Show this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[33m\033[0m\n"} /^[a-zA-Z_0-9\/%_-]+:.*?##/ { printf " \033[1;32m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) diff --git a/vendor/go.lsp.dev/protocol/README.md b/vendor/go.lsp.dev/protocol/README.md new file mode 100644 index 00000000000..2f091def7ac --- /dev/null +++ b/vendor/go.lsp.dev/protocol/README.md @@ -0,0 +1,19 @@ +# protocol + +[![CircleCI][circleci-badge]][circleci] [![pkg.go.dev][pkg.go.dev-badge]][pkg.go.dev] [![Go module][module-badge]][module] [![codecov.io][codecov-badge]][codecov] [![GA][ga-badge]][ga] + +Package protocol implements Language Server Protocol specification in Go. + + + +[circleci]: https://app.circleci.com/pipelines/github/go-language-server/protocol +[pkg.go.dev]: https://pkg.go.dev/go.lsp.dev/protocol +[module]: https://github.com/go-language-server/protocol/releases/latest +[codecov]: https://codecov.io/gh/go-language-server/protocol +[ga]: https://github.com/go-language-server/protocol + +[circleci-badge]: https://img.shields.io/circleci/build/github/go-language-server/protocol/main.svg?style=for-the-badge&label=CIRCLECI&logo=circleci +[pkg.go.dev-badge]: https://bit.ly/pkg-go-dev-badge +[module-badge]: https://img.shields.io/github/release/go-language-server/protocol.svg?color=007D9C&label=MODULE&style=for-the-badge&logoWidth=25&logo=data%3Aimage%2Fsvg%2Bxml%3Bbase64%2CPHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9Ijg1IDU1IDEyMCAxMjAiPjxwYXRoIGZpbGw9IiMwMEFERDgiIGQ9Ik00MC4yIDEwMS4xYy0uNCAwLS41LS4yLS4zLS41bDIuMS0yLjdjLjItLjMuNy0uNSAxLjEtLjVoMzUuN2MuNCAwIC41LjMuMy42bC0xLjcgMi42Yy0uMi4zLS43LjYtMSAuNmwtMzYuMi0uMXptLTE1LjEgOS4yYy0uNCAwLS41LS4yLS4zLS41bDIuMS0yLjdjLjItLjMuNy0uNSAxLjEtLjVoNDUuNmMuNCAwIC42LjMuNS42bC0uOCAyLjRjLS4xLjQtLjUuNi0uOS42bC00Ny4zLjF6bTI0LjIgOS4yYy0uNCAwLS41LS4zLS4zLS42bDEuNC0yLjVjLjItLjMuNi0uNiAxLS42aDIwYy40IDAgLjYuMy42LjdsLS4yIDIuNGMwIC40LS40LjctLjcuN2wtMjEuOC0uMXptMTAzLjgtMjAuMmMtNi4zIDEuNi0xMC42IDIuOC0xNi44IDQuNC0xLjUuNC0xLjYuNS0yLjktMS0xLjUtMS43LTIuNi0yLjgtNC43LTMuOC02LjMtMy4xLTEyLjQtMi4yLTE4LjEgMS41LTYuOCA0LjQtMTAuMyAxMC45LTEwLjIgMTkgLjEgOCA1LjYgMTQuNiAxMy41IDE1LjcgNi44LjkgMTIuNS0xLjUgMTctNi42LjktMS4xIDEuNy0yLjMgMi43LTMuN2gtMTkuM2MtMi4xIDAtMi42LTEuMy0xLjktMyAxLjMtMy4xIDMuNy04LjMgNS4xLTEwLjkuMy0uNiAxLTEuNiAyLjUtMS42aDM2LjRjLS4yIDIuNy0uMiA1LjQtLjYgOC4xLTEuMSA3LjItMy44IDEzLjgtOC4yIDE5LjYtNy4yIDkuNS0xNi42IDE1LjQtMjguNSAxNy05LjggMS4zLTE4LjktLjYtMjYuOS02LjYtNy40LTUuNi0xMS42LTEzLTEyLjctMjIuMi0xLjMtMTAuOSAxLjktMjAuNyA4LjUtMjkuMyA3LjEtOS4zIDE2LjUtMTUuMiAyOC0xNy4zIDkuNC0xLjcgMTguNC0uNiAyNi41IDQuOSA1LjMgMy41IDkuMSA4LjMgMTEuNiAxNC4xLjYuOS4yIDEuNC0xIDEuN3oiLz48cGF0aCBmaWxsPSIjMDBBREQ4IiBkPSJNMTg2LjIgMTU0LjZjLTkuMS0uMi0xNy40LTIuOC0yNC40LTguOC01LjktNS4xLTkuNi0xMS42LTEwLjgtMTkuMy0xLjgtMTEuMyAxLjMtMjEuMyA4LjEtMzAuMiA3LjMtOS42IDE2LjEtMTQuNiAyOC0xNi43IDEwLjItMS44IDE5LjgtLjggMjguNSA1LjEgNy45IDUuNCAxMi44IDEyLjcgMTQuMSAyMi4zIDEuNyAxMy41LTIuMiAyNC41LTExLjUgMzMuOS02LjYgNi43LTE0LjcgMTAuOS0yNCAxMi44LTIuNy41LTUuNC42LTggLjl6bTIzLjgtNDAuNGMtLjEtMS4zLS4xLTIuMy0uMy0zLjMtMS44LTkuOS0xMC45LTE1LjUtMjAuNC0xMy4zLTkuMyAyLjEtMTUuMyA4LTE3LjUgMTcuNC0xLjggNy44IDIgMTUuNyA5LjIgMTguOSA1LjUgMi40IDExIDIuMSAxNi4zLS42IDcuOS00LjEgMTIuMi0xMC41IDEyLjctMTkuMXoiLz48L3N2Zz4= +[codecov-badge]: https://img.shields.io/codecov/c/github/go-language-server/protocol/main?logo=codecov&style=for-the-badge +[ga-badge]: https://gh-ga-beacon.appspot.com/UA-89201129-1/go-language-server/protocol?useReferer&pixel diff --git a/vendor/go.lsp.dev/protocol/base.go b/vendor/go.lsp.dev/protocol/base.go new file mode 100644 index 00000000000..3cca2e9f74a --- /dev/null +++ b/vendor/go.lsp.dev/protocol/base.go @@ -0,0 +1,96 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import ( + "fmt" + + "github.com/segmentio/encoding/json" +) + +// CancelParams params of cancelRequest. +type CancelParams struct { + // ID is the request id to cancel. + ID interface{} `json:"id"` // int32 | string +} + +// ProgressParams params of Progress netification. +// +// @since 3.15.0. +type ProgressParams struct { + // Token is the progress token provided by the client or server. + Token ProgressToken `json:"token"` + + // Value is the progress data. + Value interface{} `json:"value"` +} + +// ProgressToken is the progress token provided by the client or server. +// +// @since 3.15.0. +type ProgressToken struct { + name string + number int32 +} + +// compile time check whether the ProgressToken implements a fmt.Formatter, fmt.Stringer, json.Marshaler and json.Unmarshaler interfaces. +var ( + _ fmt.Formatter = (*ProgressToken)(nil) + _ fmt.Stringer = (*ProgressToken)(nil) + _ json.Marshaler = (*ProgressToken)(nil) + _ json.Unmarshaler = (*ProgressToken)(nil) +) + +// NewProgressToken returns a new ProgressToken. +func NewProgressToken(s string) *ProgressToken { + return &ProgressToken{name: s} +} + +// NewNumberProgressToken returns a new number ProgressToken. +func NewNumberProgressToken(n int32) *ProgressToken { + return &ProgressToken{number: n} +} + +// Format writes the ProgressToken to the formatter. +// +// If the rune is q the representation is non ambiguous, +// string forms are quoted. +func (v ProgressToken) Format(f fmt.State, r rune) { + const numF = `%d` + strF := `%s` + if r == 'q' { + strF = `%q` + } + + switch { + case v.name != "": + fmt.Fprintf(f, strF, v.name) + default: + fmt.Fprintf(f, numF, v.number) + } +} + +// String returns a string representation of the ProgressToken. +func (v ProgressToken) String() string { + return fmt.Sprint(v) +} + +// MarshalJSON implements json.Marshaler. +func (v *ProgressToken) MarshalJSON() ([]byte, error) { + if v.name != "" { + return json.Marshal(v.name) + } + + return json.Marshal(v.number) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *ProgressToken) UnmarshalJSON(data []byte) error { + *v = ProgressToken{} + if err := json.Unmarshal(data, &v.number); err == nil { + return nil + } + + return json.Unmarshal(data, &v.name) +} diff --git a/vendor/go.lsp.dev/protocol/basic.go b/vendor/go.lsp.dev/protocol/basic.go new file mode 100644 index 00000000000..feb022fea4c --- /dev/null +++ b/vendor/go.lsp.dev/protocol/basic.go @@ -0,0 +1,705 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import ( + "go.lsp.dev/uri" +) + +// DocumentURI represents the URI of a document. +// +// Many of the interfaces contain fields that correspond to the URI of a document. +// For clarity, the type of such a field is declared as a DocumentURI. +// Over the wire, it will still be transferred as a string, but this guarantees +// that the contents of that string can be parsed as a valid URI. +type DocumentURI = uri.URI + +// URI a tagging interface for normal non document URIs. +// +// @since 3.16.0. +type URI = uri.URI + +// EOL denotes represents the character offset. +var EOL = []string{"\n", "\r\n", "\r"} + +// Position represents a text document expressed as zero-based line and zero-based character offset. +// +// The offsets are based on a UTF-16 string representation. +// So a string of the form "a𐐀b" the character offset of the character "a" is 0, +// the character offset of "𐐀" is 1 and the character offset of "b" is 3 since 𐐀 is represented using two code +// units in UTF-16. +// +// Positions are line end character agnostic. So you can not specify a position that +// denotes "\r|\n" or "\n|" where "|" represents the character offset. +// +// Position is between two characters like an "insert" cursor in a editor. +// Special values like for example "-1" to denote the end of a line are not supported. +type Position struct { + // Line position in a document (zero-based). + // + // If a line number is greater than the number of lines in a document, it defaults back to the number of lines in + // the document. + // If a line number is negative, it defaults to 0. + Line uint32 `json:"line"` + + // Character offset on a line in a document (zero-based). + // + // Assuming that the line is represented as a string, the Character value represents the gap between the + // "character" and "character + 1". + // + // If the character value is greater than the line length it defaults back to the line length. + // If a line number is negative, it defaults to 0. + Character uint32 `json:"character"` +} + +// Range represents a text document expressed as (zero-based) start and end positions. +// +// A range is comparable to a selection in an editor. Therefore the end position is exclusive. +// If you want to specify a range that contains a line including the line ending character(s) then use an end position +// denoting the start of the next line. +type Range struct { + // Start is the range's start position. + Start Position `json:"start"` + + // End is the range's end position. + End Position `json:"end"` +} + +// Location represents a location inside a resource, such as a line inside a text file. +type Location struct { + URI DocumentURI `json:"uri"` + Range Range `json:"range"` +} + +// LocationLink represents a link between a source and a target location. +type LocationLink struct { + // OriginSelectionRange span of the origin of this link. + // + // Used as the underlined span for mouse interaction. Defaults to the word range at the mouse position. + OriginSelectionRange *Range `json:"originSelectionRange,omitempty"` + + // TargetURI is the target resource identifier of this link. + TargetURI DocumentURI `json:"targetUri"` + + // TargetRange is the full target range of this link. + // + // If the target for example is a symbol then target range is the range enclosing this symbol not including + // leading/trailing whitespace but everything else like comments. + // + // This information is typically used to highlight the range in the editor. + TargetRange Range `json:"targetRange"` + + // TargetSelectionRange is the range that should be selected and revealed when this link is being followed, + // e.g the name of a function. + // + // Must be contained by the the TargetRange. See also DocumentSymbol#range + TargetSelectionRange Range `json:"targetSelectionRange"` +} + +// Command represents a reference to a command. Provides a title which will be used to represent a command in the UI. +// +// Commands are identified by a string identifier. +// The recommended way to handle commands is to implement their execution on the server side if the client and +// server provides the corresponding capabilities. +// +// Alternatively the tool extension code could handle the command. The protocol currently doesn't specify +// a set of well-known commands. +type Command struct { + // Title of the command, like `save`. + Title string `json:"title"` + + // Command is the identifier of the actual command handler. + Command string `json:"command"` + + // Arguments that the command handler should be invoked with. + Arguments []interface{} `json:"arguments,omitempty"` +} + +// TextEdit is a textual edit applicable to a text document. +type TextEdit struct { + // Range is the range of the text document to be manipulated. + // + // To insert text into a document create a range where start == end. + Range Range `json:"range"` + + // NewText is the string to be inserted. For delete operations use an + // empty string. + NewText string `json:"newText"` +} + +// ChangeAnnotation is the additional information that describes document changes. +// +// @since 3.16.0. +type ChangeAnnotation struct { + // Label a human-readable string describing the actual change. + // The string is rendered prominent in the user interface. + Label string `json:"label"` + + // NeedsConfirmation is a flag which indicates that user confirmation is needed + // before applying the change. + NeedsConfirmation bool `json:"needsConfirmation,omitempty"` + + // Description is a human-readable string which is rendered less prominent in + // the user interface. + Description string `json:"description,omitempty"` +} + +// ChangeAnnotationIdentifier an identifier referring to a change annotation managed by a workspace +// edit. +// +// @since 3.16.0. +type ChangeAnnotationIdentifier string + +// AnnotatedTextEdit is a special text edit with an additional change annotation. +// +// @since 3.16.0. +type AnnotatedTextEdit struct { + TextEdit + + // AnnotationID is the actual annotation identifier. + AnnotationID ChangeAnnotationIdentifier `json:"annotationId"` +} + +// TextDocumentEdit describes textual changes on a single text document. +// +// The TextDocument is referred to as a OptionalVersionedTextDocumentIdentifier to allow clients to check the +// text document version before an edit is applied. +// +// TextDocumentEdit describes all changes on a version "Si" and after they are applied move the document to +// version "Si+1". +// So the creator of a TextDocumentEdit doesn't need to sort the array or do any kind of ordering. However the +// edits must be non overlapping. +type TextDocumentEdit struct { + // TextDocument is the text document to change. + TextDocument OptionalVersionedTextDocumentIdentifier `json:"textDocument"` + + // Edits is the edits to be applied. + // + // @since 3.16.0 - support for AnnotatedTextEdit. + // This is guarded by the client capability Workspace.WorkspaceEdit.ChangeAnnotationSupport. + Edits []TextEdit `json:"edits"` // []TextEdit | []AnnotatedTextEdit +} + +// ResourceOperationKind is the file event type. +type ResourceOperationKind string + +const ( + // CreateResourceOperation supports creating new files and folders. + CreateResourceOperation ResourceOperationKind = "create" + + // RenameResourceOperation supports renaming existing files and folders. + RenameResourceOperation ResourceOperationKind = "rename" + + // DeleteResourceOperation supports deleting existing files and folders. + DeleteResourceOperation ResourceOperationKind = "delete" +) + +// CreateFileOptions represents an options to create a file. +type CreateFileOptions struct { + // Overwrite existing file. Overwrite wins over `ignoreIfExists`. + Overwrite bool `json:"overwrite,omitempty"` + + // IgnoreIfExists ignore if exists. + IgnoreIfExists bool `json:"ignoreIfExists,omitempty"` +} + +// CreateFile represents a create file operation. +type CreateFile struct { + // Kind a create. + Kind ResourceOperationKind `json:"kind"` // should be `create` + + // URI is the resource to create. + URI DocumentURI `json:"uri"` + + // Options additional options. + Options *CreateFileOptions `json:"options,omitempty"` + + // AnnotationID an optional annotation identifier describing the operation. + // + // @since 3.16.0. + AnnotationID ChangeAnnotationIdentifier `json:"annotationId,omitempty"` +} + +// RenameFileOptions represents a rename file options. +type RenameFileOptions struct { + // Overwrite target if existing. Overwrite wins over `ignoreIfExists`. + Overwrite bool `json:"overwrite,omitempty"` + + // IgnoreIfExists ignores if target exists. + IgnoreIfExists bool `json:"ignoreIfExists,omitempty"` +} + +// RenameFile represents a rename file operation. +type RenameFile struct { + // Kind a rename. + Kind ResourceOperationKind `json:"kind"` // should be `rename` + + // OldURI is the old (existing) location. + OldURI DocumentURI `json:"oldUri"` + + // NewURI is the new location. + NewURI DocumentURI `json:"newUri"` + + // Options rename options. + Options *RenameFileOptions `json:"options,omitempty"` + + // AnnotationID an optional annotation identifier describing the operation. + // + // @since 3.16.0. + AnnotationID ChangeAnnotationIdentifier `json:"annotationId,omitempty"` +} + +// DeleteFileOptions represents a delete file options. +type DeleteFileOptions struct { + // Recursive delete the content recursively if a folder is denoted. + Recursive bool `json:"recursive,omitempty"` + + // IgnoreIfNotExists ignore the operation if the file doesn't exist. + IgnoreIfNotExists bool `json:"ignoreIfNotExists,omitempty"` +} + +// DeleteFile represents a delete file operation. +type DeleteFile struct { + // Kind is a delete. + Kind ResourceOperationKind `json:"kind"` // should be `delete` + + // URI is the file to delete. + URI DocumentURI `json:"uri"` + + // Options delete options. + Options *DeleteFileOptions `json:"options,omitempty"` + + // AnnotationID an optional annotation identifier describing the operation. + // + // @since 3.16.0. + AnnotationID ChangeAnnotationIdentifier `json:"annotationId,omitempty"` +} + +// WorkspaceEdit represent a changes to many resources managed in the workspace. +// +// The edit should either provide changes or documentChanges. +// If the client can handle versioned document edits and if documentChanges are present, the latter are preferred over +// changes. +type WorkspaceEdit struct { + // Changes holds changes to existing resources. + Changes map[DocumentURI][]TextEdit `json:"changes,omitempty"` + + // DocumentChanges depending on the client capability `workspace.workspaceEdit.resourceOperations` document changes + // are either an array of `TextDocumentEdit`s to express changes to n different text documents + // where each text document edit addresses a specific version of a text document. Or it can contain + // above `TextDocumentEdit`s mixed with create, rename and delete file / folder operations. + // + // Whether a client supports versioned document edits is expressed via + // `workspace.workspaceEdit.documentChanges` client capability. + // + // If a client neither supports `documentChanges` nor `workspace.workspaceEdit.resourceOperations` then + // only plain `TextEdit`s using the `changes` property are supported. + DocumentChanges []TextDocumentEdit `json:"documentChanges,omitempty"` + + // ChangeAnnotations is a map of change annotations that can be referenced in + // "AnnotatedTextEdit"s or create, rename and delete file / folder + // operations. + // + // Whether clients honor this property depends on the client capability + // "workspace.changeAnnotationSupport". + // + // @since 3.16.0. + ChangeAnnotations map[ChangeAnnotationIdentifier]ChangeAnnotation `json:"changeAnnotations,omitempty"` +} + +// TextDocumentIdentifier indicates the using a URI. On the protocol level, URIs are passed as strings. +type TextDocumentIdentifier struct { + // URI is the text document's URI. + URI DocumentURI `json:"uri"` +} + +// TextDocumentItem represent an item to transfer a text document from the client to the server. +type TextDocumentItem struct { + // URI is the text document's URI. + URI DocumentURI `json:"uri"` + + // LanguageID is the text document's language identifier. + LanguageID LanguageIdentifier `json:"languageId"` + + // Version is the version number of this document (it will increase after each + // change, including undo/redo). + Version int32 `json:"version"` + + // Text is the content of the opened text document. + Text string `json:"text"` +} + +// LanguageIdentifier represent a text document's language identifier. +type LanguageIdentifier string + +const ( + // ABAPLanguage ABAP Language. + ABAPLanguage LanguageIdentifier = "abap" + + // BatLanguage Windows Bat Language. + BatLanguage LanguageIdentifier = "bat" + + // BibtexLanguage BibTeX Language. + BibtexLanguage LanguageIdentifier = "bibtex" + + // ClojureLanguage Clojure Language. + ClojureLanguage LanguageIdentifier = "clojure" + + // CoffeescriptLanguage CoffeeScript Language. + CoffeeScriptLanguage LanguageIdentifier = "coffeescript" + + // CLanguage C Language. + CLanguage LanguageIdentifier = "c" + + // CppLanguage C++ Language. + CppLanguage LanguageIdentifier = "cpp" + + // CsharpLanguage C# Language. + CsharpLanguage LanguageIdentifier = "csharp" + + // CSSLanguage CSS Language. + CSSLanguage LanguageIdentifier = "css" + + // DiffLanguage Diff Language. + DiffLanguage LanguageIdentifier = "diff" + + // DartLanguage Dart Language. + DartLanguage LanguageIdentifier = "dart" + + // DockerfileLanguage Dockerfile Language. + DockerfileLanguage LanguageIdentifier = "dockerfile" + + // ElixirLanguage Elixir Language. + ElixirLanguage LanguageIdentifier = "elixir" + + // ErlangLanguage Erlang Language. + ErlangLanguage LanguageIdentifier = "erlang" + + // FsharpLanguage F# Language. + FsharpLanguage LanguageIdentifier = "fsharp" + + // GitCommitLanguage Git Language. + GitCommitLanguage LanguageIdentifier = "git-commit" + + // GitRebaseLanguage Git Language. + GitRebaseLanguage LanguageIdentifier = "git-rebase" + + // GoLanguage Go Language. + GoLanguage LanguageIdentifier = "go" + + // GroovyLanguage Groovy Language. + GroovyLanguage LanguageIdentifier = "groovy" + + // HandlebarsLanguage Handlebars Language. + HandlebarsLanguage LanguageIdentifier = "handlebars" + + // HTMLLanguage HTML Language. + HTMLLanguage LanguageIdentifier = "html" + + // IniLanguage Ini Language. + IniLanguage LanguageIdentifier = "ini" + + // JavaLanguage Java Language. + JavaLanguage LanguageIdentifier = "java" + + // JavaScriptLanguage JavaScript Language. + JavaScriptLanguage LanguageIdentifier = "javascript" + + // JavaScriptReactLanguage JavaScript React Language. + JavaScriptReactLanguage LanguageIdentifier = "javascriptreact" + + // JSONLanguage JSON Language. + JSONLanguage LanguageIdentifier = "json" + + // LatexLanguage LaTeX Language. + LatexLanguage LanguageIdentifier = "latex" + + // LessLanguage Less Language. + LessLanguage LanguageIdentifier = "less" + + // LuaLanguage Lua Language. + LuaLanguage LanguageIdentifier = "lua" + + // MakefileLanguage Makefile Language. + MakefileLanguage LanguageIdentifier = "makefile" + + // MarkdownLanguage Markdown Language. + MarkdownLanguage LanguageIdentifier = "markdown" + + // ObjectiveCLanguage Objective-C Language. + ObjectiveCLanguage LanguageIdentifier = "objective-c" + + // ObjectiveCppLanguage Objective-C++ Language. + ObjectiveCppLanguage LanguageIdentifier = "objective-cpp" + + // PerlLanguage Perl Language. + PerlLanguage LanguageIdentifier = "perl" + + // Perl6Language Perl Language. + Perl6Language LanguageIdentifier = "perl6" + + // PHPLanguage PHP Language. + PHPLanguage LanguageIdentifier = "php" + + // PowershellLanguage Powershell Language. + PowershellLanguage LanguageIdentifier = "powershell" + + // JadeLanguage Pug Language. + JadeLanguage LanguageIdentifier = "jade" + + // PythonLanguage Python Language. + PythonLanguage LanguageIdentifier = "python" + + // RLanguage R Language. + RLanguage LanguageIdentifier = "r" + + // RazorLanguage Razor(cshtml) Language. + RazorLanguage LanguageIdentifier = "razor" + + // RubyLanguage Ruby Language. + RubyLanguage LanguageIdentifier = "ruby" + + // RustLanguage Rust Language. + RustLanguage LanguageIdentifier = "rust" + + // SCSSLanguage SCSS Languages syntax using curly brackets. + SCSSLanguage LanguageIdentifier = "scss" + + // SASSLanguage SCSS Languages indented syntax. + SASSLanguage LanguageIdentifier = "sass" + + // ScalaLanguage Scala Language. + ScalaLanguage LanguageIdentifier = "scala" + + // ShaderlabLanguage ShaderLab Language. + ShaderlabLanguage LanguageIdentifier = "shaderlab" + + // ShellscriptLanguage Shell Script (Bash) Language. + ShellscriptLanguage LanguageIdentifier = "shellscript" + + // SQLLanguage SQL Language. + SQLLanguage LanguageIdentifier = "sql" + + // SwiftLanguage Swift Language. + SwiftLanguage LanguageIdentifier = "swift" + + // TypeScriptLanguage TypeScript Language. + TypeScriptLanguage LanguageIdentifier = "typescript" + + // TypeScriptReactLanguage TypeScript React Language. + TypeScriptReactLanguage LanguageIdentifier = "typescriptreact" + + // TeXLanguage TeX Language. + TeXLanguage LanguageIdentifier = "tex" + + // VBLanguage Visual Basic Language. + VBLanguage LanguageIdentifier = "vb" + + // XMLLanguage XML Language. + XMLLanguage LanguageIdentifier = "xml" + + // XslLanguage XSL Language. + XslLanguage LanguageIdentifier = "xsl" + + // YamlLanguage YAML Language. + YamlLanguage LanguageIdentifier = "yaml" +) + +// languageIdentifierMap map of LanguageIdentifiers. +var languageIdentifierMap = map[string]LanguageIdentifier{ + "abap": ABAPLanguage, + "bat": BatLanguage, + "bibtex": BibtexLanguage, + "clojure": ClojureLanguage, + "coffeescript": CoffeeScriptLanguage, + "c": CLanguage, + "cpp": CppLanguage, + "csharp": CsharpLanguage, + "css": CSSLanguage, + "diff": DiffLanguage, + "dart": DartLanguage, + "dockerfile": DockerfileLanguage, + "elixir": ElixirLanguage, + "erlang": ErlangLanguage, + "fsharp": FsharpLanguage, + "git-commit": GitCommitLanguage, + "git-rebase": GitRebaseLanguage, + "go": GoLanguage, + "groovy": GroovyLanguage, + "handlebars": HandlebarsLanguage, + "html": HTMLLanguage, + "ini": IniLanguage, + "java": JavaLanguage, + "javascript": JavaScriptLanguage, + "javascriptreact": JavaScriptReactLanguage, + "json": JSONLanguage, + "latex": LatexLanguage, + "less": LessLanguage, + "lua": LuaLanguage, + "makefile": MakefileLanguage, + "markdown": MarkdownLanguage, + "objective-c": ObjectiveCLanguage, + "objective-cpp": ObjectiveCppLanguage, + "perl": PerlLanguage, + "perl6": Perl6Language, + "php": PHPLanguage, + "powershell": PowershellLanguage, + "jade": JadeLanguage, + "python": PythonLanguage, + "r": RLanguage, + "razor": RazorLanguage, + "ruby": RubyLanguage, + "rust": RustLanguage, + "scss": SCSSLanguage, + "sass": SASSLanguage, + "scala": ScalaLanguage, + "shaderlab": ShaderlabLanguage, + "shellscript": ShellscriptLanguage, + "sql": SQLLanguage, + "swift": SwiftLanguage, + "typescript": TypeScriptLanguage, + "typescriptreact": TypeScriptReactLanguage, + "tex": TeXLanguage, + "vb": VBLanguage, + "xml": XMLLanguage, + "xsl": XslLanguage, + "yaml": YamlLanguage, +} + +// ToLanguageIdentifier converts ft to LanguageIdentifier. +func ToLanguageIdentifier(ft string) LanguageIdentifier { + langID, ok := languageIdentifierMap[ft] + if ok { + return langID + } + + return LanguageIdentifier(ft) +} + +// VersionedTextDocumentIdentifier represents an identifier to denote a specific version of a text document. +// +// This information usually flows from the client to the server. +type VersionedTextDocumentIdentifier struct { + TextDocumentIdentifier + + // Version is the version number of this document. + // + // The version number of a document will increase after each change, including + // undo/redo. The number doesn't need to be consecutive. + Version int32 `json:"version"` +} + +// OptionalVersionedTextDocumentIdentifier represents an identifier which optionally denotes a specific version of +// a text document. +// +// This information usually flows from the server to the client. +// +// @since 3.16.0. +type OptionalVersionedTextDocumentIdentifier struct { + TextDocumentIdentifier + + // Version is the version number of this document. If an optional versioned text document + // identifier is sent from the server to the client and the file is not + // open in the editor (the server has not received an open notification + // before) the server can send `null` to indicate that the version is + // known and the content on disk is the master (as specified with document + // content ownership). + // + // The version number of a document will increase after each change, + // including undo/redo. The number doesn't need to be consecutive. + Version *int32 `json:"version"` // int32 | null +} + +// TextDocumentPositionParams is a parameter literal used in requests to pass a text document and a position +// inside that document. +// +// It is up to the client to decide how a selection is converted into a position when issuing a request for a text +// document. +// +// The client can for example honor or ignore the selection direction to make LSP request consistent with features +// implemented internally. +type TextDocumentPositionParams struct { + // TextDocument is the text document. + TextDocument TextDocumentIdentifier `json:"textDocument"` + + // Position is the position inside the text document. + Position Position `json:"position"` +} + +// DocumentFilter is a document filter denotes a document through properties like language, scheme or pattern. +// +// An example is a filter that applies to TypeScript files on disk. +type DocumentFilter struct { + // Language a language id, like `typescript`. + Language string `json:"language,omitempty"` + + // Scheme a URI scheme, like `file` or `untitled`. + Scheme string `json:"scheme,omitempty"` + + // Pattern a glob pattern, like `*.{ts,js}`. + // + // Glob patterns can have the following syntax: + // "*" + // "*" to match one or more characters in a path segment + // "?" + // "?" to match on one character in a path segment + // "**" + // "**" to match any number of path segments, including none + // "{}" + // "{}" to group conditions (e.g. `**/*.{ts,js}` matches all TypeScript and JavaScript files) + // "[]" + // "[]" to declare a range of characters to match in a path segment (e.g., `example.[0-9]` to match on `example.0`, `example.1`, …) + // "[!...]" + // "[!...]" to negate a range of characters to match in a path segment (e.g., `example.[!0-9]` to match on `example.a`, `example.b`, but not `example.0`) + Pattern string `json:"pattern,omitempty"` +} + +// DocumentSelector is a document selector is the combination of one or more document filters. +type DocumentSelector []*DocumentFilter + +// MarkupKind describes the content type that a client supports in various +// result literals like `Hover`, `ParameterInfo` or `CompletionItem`. +// +// Please note that `MarkupKinds` must not start with a `$`. This kinds +// are reserved for internal usage. +type MarkupKind string + +const ( + // PlainText is supported as a content format. + PlainText MarkupKind = "plaintext" + + // Markdown is supported as a content format. + Markdown MarkupKind = "markdown" +) + +// MarkupContent a `MarkupContent` literal represents a string value which content is interpreted base on its +// kind flag. +// +// Currently the protocol supports `plaintext` and `markdown` as markup kinds. +// +// If the kind is `markdown` then the value can contain fenced code blocks like in GitHub issues. +// See https://help.github.com/articles/creating-and-highlighting-code-blocks/#syntax-highlighting +// +// Here is an example how such a string can be constructed using JavaScript / TypeScript: +// +// let markdown: MarkdownContent = { +// kind: MarkupKind.Markdown, +// value: [ +// '# Header', +// 'Some text', +// '```typescript', +// 'someCode();', +// '```' +// ].join('\n') +// }; +// +// NOTE: clients might sanitize the return markdown. A client could decide to +// remove HTML from the markdown to avoid script execution. +type MarkupContent struct { + // Kind is the type of the Markup + Kind MarkupKind `json:"kind"` + + // Value is the content itself + Value string `json:"value"` +} diff --git a/vendor/go.lsp.dev/protocol/callhierarchy.go b/vendor/go.lsp.dev/protocol/callhierarchy.go new file mode 100644 index 00000000000..eebb9e39787 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/callhierarchy.go @@ -0,0 +1,103 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +// CallHierarchy capabilities specific to the "textDocument/callHierarchy". +// +// @since 3.16.0. +type CallHierarchy struct { + // DynamicRegistration whether implementation supports dynamic registration. + // + // If this is set to "true" the client supports the new + // TextDocumentRegistrationOptions && StaticRegistrationOptions return + // value for the corresponding server capability as well. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// CallHierarchyPrepareParams params of CallHierarchyPrepare. +// +// @since 3.16.0. +type CallHierarchyPrepareParams struct { + TextDocumentPositionParams + WorkDoneProgressParams +} + +// CallHierarchyItem is the result of a "textDocument/prepareCallHierarchy" request. +// +// @since 3.16.0. +type CallHierarchyItem struct { + // name is the name of this item. + Name string `json:"name"` + + // Kind is the kind of this item. + Kind SymbolKind `json:"kind"` + + // Tags for this item. + Tags []SymbolTag `json:"tags,omitempty"` + + // Detail more detail for this item, e.g. the signature of a function. + Detail string `json:"detail,omitempty"` + + // URI is the resource identifier of this item. + URI DocumentURI `json:"uri"` + + // Range is the range enclosing this symbol not including leading/trailing whitespace + // but everything else, e.g. comments and code. + Range Range `json:"range"` + + // SelectionRange is the range that should be selected and revealed when this symbol is being + // picked, e.g. the name of a function. Must be contained by the + // Range. + SelectionRange Range `json:"selectionRange"` + + // Data is a data entry field that is preserved between a call hierarchy prepare and + // incoming calls or outgoing calls requests. + Data interface{} `json:"data,omitempty"` +} + +// CallHierarchyIncomingCallsParams params of CallHierarchyIncomingCalls. +// +// @since 3.16.0. +type CallHierarchyIncomingCallsParams struct { + WorkDoneProgressParams + PartialResultParams + + // Item is the IncomingCalls item. + Item CallHierarchyItem `json:"item"` +} + +// CallHierarchyIncomingCall is the result of a "callHierarchy/incomingCalls" request. +// +// @since 3.16.0. +type CallHierarchyIncomingCall struct { + // From is the item that makes the call. + From CallHierarchyItem `json:"from"` + + // FromRanges is the ranges at which the calls appear. This is relative to the caller + // denoted by From. + FromRanges []Range `json:"fromRanges"` +} + +// CallHierarchyOutgoingCallsParams params of CallHierarchyOutgoingCalls. +// +// @since 3.16.0. +type CallHierarchyOutgoingCallsParams struct { + WorkDoneProgressParams + PartialResultParams + + // Item is the OutgoingCalls item. + Item CallHierarchyItem `json:"item"` +} + +// CallHierarchyOutgoingCall is the result of a "callHierarchy/outgoingCalls" request. +// +// @since 3.16.0. +type CallHierarchyOutgoingCall struct { + // To is the item that is called. + To CallHierarchyItem `json:"to"` + + // FromRanges is the range at which this item is called. This is the range relative to + // the caller, e.g the item passed to "callHierarchy/outgoingCalls" request. + FromRanges []Range `json:"fromRanges"` +} diff --git a/vendor/go.lsp.dev/protocol/capabilities_client.go b/vendor/go.lsp.dev/protocol/capabilities_client.go new file mode 100644 index 00000000000..2d6bb74e3dc --- /dev/null +++ b/vendor/go.lsp.dev/protocol/capabilities_client.go @@ -0,0 +1,1061 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import "strconv" + +// ClientCapabilities now define capabilities for dynamic registration, workspace and text document features +// the client supports. +// +// The experimental can be used to pass experimental capabilities under development. +// +// For future compatibility a ClientCapabilities object literal can have more properties set than currently defined. +// Servers receiving a ClientCapabilities object literal with unknown properties should ignore these properties. +// +// A missing property should be interpreted as an absence of the capability. +// If a missing property normally defines sub properties, all missing sub properties should be interpreted +// as an absence of the corresponding capability. +type ClientCapabilities struct { + // Workspace specific client capabilities. + Workspace *WorkspaceClientCapabilities `json:"workspace,omitempty"` + + // TextDocument specific client capabilities. + TextDocument *TextDocumentClientCapabilities `json:"textDocument,omitempty"` + + // Window specific client capabilities. + Window *WindowClientCapabilities `json:"window,omitempty"` + + // General client capabilities. + // + // @since 3.16.0. + General *GeneralClientCapabilities `json:"general,omitempty"` + + // Experimental client capabilities. + Experimental interface{} `json:"experimental,omitempty"` +} + +// WorkspaceClientCapabilities Workspace specific client capabilities. +type WorkspaceClientCapabilities struct { + // The client supports applying batch edits to the workspace by supporting + // the request "workspace/applyEdit". + ApplyEdit bool `json:"applyEdit,omitempty"` + + // WorkspaceEdit capabilities specific to `WorkspaceEdit`s. + WorkspaceEdit *WorkspaceClientCapabilitiesWorkspaceEdit `json:"workspaceEdit,omitempty"` + + // DidChangeConfiguration capabilities specific to the `workspace/didChangeConfiguration` notification. + DidChangeConfiguration *DidChangeConfigurationWorkspaceClientCapabilities `json:"didChangeConfiguration,omitempty"` + + // DidChangeWatchedFiles capabilities specific to the `workspace/didChangeWatchedFiles` notification. + DidChangeWatchedFiles *DidChangeWatchedFilesWorkspaceClientCapabilities `json:"didChangeWatchedFiles,omitempty"` + + // Symbol capabilities specific to the "workspace/symbol" request. + Symbol *WorkspaceSymbolClientCapabilities `json:"symbol,omitempty"` + + // ExecuteCommand capabilities specific to the "workspace/executeCommand" request. + ExecuteCommand *ExecuteCommandClientCapabilities `json:"executeCommand,omitempty"` + + // WorkspaceFolders is the client has support for workspace folders. + // + // @since 3.6.0. + WorkspaceFolders bool `json:"workspaceFolders,omitempty"` + + // Configuration is the client supports "workspace/configuration" requests. + // + // @since 3.6.0. + Configuration bool `json:"configuration,omitempty"` + + // SemanticTokens is the capabilities specific to the semantic token requests scoped to the + // workspace. + // + // @since 3.16.0. + SemanticTokens *SemanticTokensWorkspaceClientCapabilities `json:"semanticTokens,omitempty"` + + // CodeLens is the Capabilities specific to the code lens requests scoped to the + // workspace. + // + // @since 3.16.0. + CodeLens *CodeLensWorkspaceClientCapabilities `json:"codeLens,omitempty"` + + // FileOperations is the client has support for file requests/notifications. + // + // @since 3.16.0. + FileOperations *WorkspaceClientCapabilitiesFileOperations `json:"fileOperations,omitempty"` +} + +// WorkspaceClientCapabilitiesWorkspaceEdit capabilities specific to "WorkspaceEdit"s. +type WorkspaceClientCapabilitiesWorkspaceEdit struct { + // DocumentChanges is the client supports versioned document changes in `WorkspaceEdit`s + DocumentChanges bool `json:"documentChanges,omitempty"` + + // FailureHandling is the failure handling strategy of a client if applying the workspace edit + // fails. + // + // Mostly FailureHandlingKind. + FailureHandling string `json:"failureHandling,omitempty"` + + // ResourceOperations is the resource operations the client supports. Clients should at least + // support "create", "rename" and "delete" files and folders. + ResourceOperations []string `json:"resourceOperations,omitempty"` + + // NormalizesLineEndings whether the client normalizes line endings to the client specific + // setting. + // If set to `true` the client will normalize line ending characters + // in a workspace edit to the client specific new line character(s). + // + // @since 3.16.0. + NormalizesLineEndings bool `json:"normalizesLineEndings,omitempty"` + + // ChangeAnnotationSupport whether the client in general supports change annotations on text edits, + // create file, rename file and delete file changes. + // + // @since 3.16.0. + ChangeAnnotationSupport *WorkspaceClientCapabilitiesWorkspaceEditChangeAnnotationSupport `json:"changeAnnotationSupport,omitempty"` +} + +// FailureHandlingKind is the kind of failure handling . +type FailureHandlingKind string + +const ( + // FailureHandlingKindAbort applying the workspace change is simply aborted if one of the changes provided + // fails. All operations executed before the failing operation stay executed. + FailureHandlingKindAbort FailureHandlingKind = "abort" + + // FailureHandlingKindTransactional all operations are executed transactional. That means they either all + // succeed or no changes at all are applied to the workspace. + FailureHandlingKindTransactional FailureHandlingKind = "transactional" + + // FailureHandlingKindTextOnlyTransactional if the workspace edit contains only textual file changes they are executed transactional. + // If resource changes (create, rename or delete file) are part of the change the failure + // handling strategy is abort. + FailureHandlingKindTextOnlyTransactional FailureHandlingKind = "textOnlyTransactional" + + // FailureHandlingKindUndo the client tries to undo the operations already executed. But there is no + // guarantee that this is succeeding. + FailureHandlingKindUndo FailureHandlingKind = "undo" +) + +// WorkspaceClientCapabilitiesWorkspaceEditChangeAnnotationSupport is the ChangeAnnotationSupport of WorkspaceClientCapabilitiesWorkspaceEdit. +// +// @since 3.16.0. +type WorkspaceClientCapabilitiesWorkspaceEditChangeAnnotationSupport struct { + // GroupsOnLabel whether the client groups edits with equal labels into tree nodes, + // for instance all edits labeled with "Changes in Strings" would + // be a tree node. + GroupsOnLabel bool `json:"groupsOnLabel,omitempty"` +} + +// DidChangeConfigurationWorkspaceClientCapabilities capabilities specific to the "workspace/didChangeConfiguration" notification. +// +// @since 3.16.0. +type DidChangeConfigurationWorkspaceClientCapabilities struct { + // DynamicRegistration whether the did change configuration notification supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// DidChangeWatchedFilesWorkspaceClientCapabilities capabilities specific to the "workspace/didChangeWatchedFiles" notification. +// +// @since 3.16.0. +type DidChangeWatchedFilesWorkspaceClientCapabilities struct { + // Did change watched files notification supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// WorkspaceSymbolClientCapabilities capabilities specific to the `workspace/symbol` request. +// +// WorkspaceSymbolClientCapabilities is the workspace symbol request is sent from the client to the server to +// list project-wide symbols matching the query string. +type WorkspaceSymbolClientCapabilities struct { + // DynamicRegistration is the Symbol request supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // SymbolKindCapabilities is the specific capabilities for the SymbolKindCapabilities in the "workspace/symbol" request. + SymbolKind *SymbolKindCapabilities `json:"symbolKind,omitempty"` + + // TagSupport is the client supports tags on `SymbolInformation`. + // Clients supporting tags have to handle unknown tags gracefully. + // + // @since 3.16.0 + TagSupport *TagSupportCapabilities `json:"tagSupport,omitempty"` +} + +type SymbolKindCapabilities struct { + // ValueSet is the symbol kind values the client supports. When this + // property exists the client also guarantees that it will + // handle values outside its set gracefully and falls back + // to a default value when unknown. + // + // If this property is not present the client only supports + // the symbol kinds from `File` to `Array` as defined in + // the initial version of the protocol. + ValueSet []SymbolKind `json:"valueSet,omitempty"` +} + +type TagSupportCapabilities struct { + // ValueSet is the tags supported by the client. + ValueSet []SymbolTag `json:"valueSet,omitempty"` +} + +// ExecuteCommandClientCapabilities capabilities specific to the "workspace/executeCommand" request. +type ExecuteCommandClientCapabilities struct { + // DynamicRegistration Execute command supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// SemanticTokensWorkspaceClientCapabilities capabilities specific to the "workspace/semanticToken" request. +// +// @since 3.16.0. +type SemanticTokensWorkspaceClientCapabilities struct { + // RefreshSupport whether the client implementation supports a refresh request sent from + // the server to the client. + // + // Note that this event is global and will force the client to refresh all + // semantic tokens currently shown. It should be used with absolute care + // and is useful for situation where a server for example detect a project + // wide change that requires such a calculation. + RefreshSupport bool `json:"refreshSupport,omitempty"` +} + +// CodeLensWorkspaceClientCapabilities capabilities specific to the "workspace/codeLens" request. +// +// @since 3.16.0. +type CodeLensWorkspaceClientCapabilities struct { + // RefreshSupport whether the client implementation supports a refresh request sent from the + // server to the client. + // + // Note that this event is global and will force the client to refresh all + // code lenses currently shown. It should be used with absolute care and is + // useful for situation where a server for example detect a project wide + // change that requires such a calculation. + RefreshSupport bool `json:"refreshSupport,omitempty"` +} + +// WorkspaceClientCapabilitiesFileOperations capabilities specific to the fileOperations. +// +// @since 3.16.0. +type WorkspaceClientCapabilitiesFileOperations struct { + // DynamicRegistration whether the client supports dynamic registration for file + // requests/notifications. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // DidCreate is the client has support for sending didCreateFiles notifications. + DidCreate bool `json:"didCreate,omitempty"` + + // WillCreate is the client has support for sending willCreateFiles requests. + WillCreate bool `json:"willCreate,omitempty"` + + // DidRename is the client has support for sending didRenameFiles notifications. + DidRename bool `json:"didRename,omitempty"` + + // WillRename is the client has support for sending willRenameFiles requests. + WillRename bool `json:"willRename,omitempty"` + + // DidDelete is the client has support for sending didDeleteFiles notifications. + DidDelete bool `json:"didDelete,omitempty"` + + // WillDelete is the client has support for sending willDeleteFiles requests. + WillDelete bool `json:"willDelete,omitempty"` +} + +// TextDocumentClientCapabilities Text document specific client capabilities. +type TextDocumentClientCapabilities struct { + // Synchronization defines which synchronization capabilities the client supports. + Synchronization *TextDocumentSyncClientCapabilities `json:"synchronization,omitempty"` + + // Completion Capabilities specific to the "textDocument/completion". + Completion *CompletionTextDocumentClientCapabilities `json:"completion,omitempty"` + + // Hover capabilities specific to the "textDocument/hover". + Hover *HoverTextDocumentClientCapabilities `json:"hover,omitempty"` + + // SignatureHelp capabilities specific to the "textDocument/signatureHelp". + SignatureHelp *SignatureHelpTextDocumentClientCapabilities `json:"signatureHelp,omitempty"` + + // Declaration capabilities specific to the "textDocument/declaration". + Declaration *DeclarationTextDocumentClientCapabilities `json:"declaration,omitempty"` + + // Definition capabilities specific to the "textDocument/definition". + // + // @since 3.14.0. + Definition *DefinitionTextDocumentClientCapabilities `json:"definition,omitempty"` + + // TypeDefinition capabilities specific to the "textDocument/typeDefinition". + // + // @since 3.6.0. + TypeDefinition *TypeDefinitionTextDocumentClientCapabilities `json:"typeDefinition,omitempty"` + + // Implementation capabilities specific to the "textDocument/implementation". + // + // @since 3.6.0. + Implementation *ImplementationTextDocumentClientCapabilities `json:"implementation,omitempty"` + + // References capabilities specific to the "textDocument/references". + References *ReferencesTextDocumentClientCapabilities `json:"references,omitempty"` + + // DocumentHighlight capabilities specific to the "textDocument/documentHighlight". + DocumentHighlight *DocumentHighlightClientCapabilities `json:"documentHighlight,omitempty"` + + // DocumentSymbol capabilities specific to the "textDocument/documentSymbol". + DocumentSymbol *DocumentSymbolClientCapabilities `json:"documentSymbol,omitempty"` + + // CodeAction capabilities specific to the "textDocument/codeAction". + CodeAction *CodeActionClientCapabilities `json:"codeAction,omitempty"` + + // CodeLens capabilities specific to the "textDocument/codeLens". + CodeLens *CodeLensClientCapabilities `json:"codeLens,omitempty"` + + // DocumentLink capabilities specific to the "textDocument/documentLink". + DocumentLink *DocumentLinkClientCapabilities `json:"documentLink,omitempty"` + + // ColorProvider capabilities specific to the "textDocument/documentColor" and the + // "textDocument/colorPresentation" request. + // + // @since 3.6.0. + ColorProvider *DocumentColorClientCapabilities `json:"colorProvider,omitempty"` + + // Formatting Capabilities specific to the "textDocument/formatting" request. + Formatting *DocumentFormattingClientCapabilities `json:"formatting,omitempty"` + + // RangeFormatting Capabilities specific to the "textDocument/rangeFormatting" request. + RangeFormatting *DocumentRangeFormattingClientCapabilities `json:"rangeFormatting,omitempty"` + + // OnTypeFormatting Capabilities specific to the "textDocument/onTypeFormatting" request. + OnTypeFormatting *DocumentOnTypeFormattingClientCapabilities `json:"onTypeFormatting,omitempty"` + + // PublishDiagnostics capabilities specific to "textDocument/publishDiagnostics". + PublishDiagnostics *PublishDiagnosticsClientCapabilities `json:"publishDiagnostics,omitempty"` + + // Rename capabilities specific to the "textDocument/rename". + Rename *RenameClientCapabilities `json:"rename,omitempty"` + + // FoldingRange capabilities specific to "textDocument/foldingRange" requests. + // + // @since 3.10.0. + FoldingRange *FoldingRangeClientCapabilities `json:"foldingRange,omitempty"` + + // SelectionRange capabilities specific to "textDocument/selectionRange" requests. + // + // @since 3.15.0. + SelectionRange *SelectionRangeClientCapabilities `json:"selectionRange,omitempty"` + + // CallHierarchy capabilities specific to the various call hierarchy requests. + // + // @since 3.16.0. + CallHierarchy *CallHierarchyClientCapabilities `json:"callHierarchy,omitempty"` + + // SemanticTokens capabilities specific to the various semantic token requests. + // + // @since 3.16.0. + SemanticTokens *SemanticTokensClientCapabilities `json:"semanticTokens,omitempty"` + + // LinkedEditingRange capabilities specific to the "textDocument/linkedEditingRange" request. + // + // @since 3.16.0. + LinkedEditingRange *LinkedEditingRangeClientCapabilities `json:"linkedEditingRange,omitempty"` + + // Moniker capabilities specific to the "textDocument/moniker" request. + // + // @since 3.16.0. + Moniker *MonikerClientCapabilities `json:"moniker,omitempty"` +} + +// TextDocumentSyncClientCapabilities defines which synchronization capabilities the client supports. +type TextDocumentSyncClientCapabilities struct { + // DynamicRegistration whether text document synchronization supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // WillSave is the client supports sending will save notifications. + WillSave bool `json:"willSave,omitempty"` + + // WillSaveWaitUntil is the client supports sending a will save request and + // waits for a response providing text edits which will + // be applied to the document before it is saved. + WillSaveWaitUntil bool `json:"willSaveWaitUntil,omitempty"` + + // DidSave is the client supports did save notifications. + DidSave bool `json:"didSave,omitempty"` +} + +// CompletionTextDocumentClientCapabilities Capabilities specific to the "textDocument/completion". +type CompletionTextDocumentClientCapabilities struct { + // Whether completion supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // The client supports the following `CompletionItem` specific + // capabilities. + CompletionItem *CompletionTextDocumentClientCapabilitiesItem `json:"completionItem,omitempty"` + + CompletionItemKind *CompletionTextDocumentClientCapabilitiesItemKind `json:"completionItemKind,omitempty"` + + // ContextSupport is the client supports to send additional context information for a + // `textDocument/completion` request. + ContextSupport bool `json:"contextSupport,omitempty"` +} + +// CompletionTextDocumentClientCapabilitiesItem is the client supports the following "CompletionItem" specific +// capabilities. +type CompletionTextDocumentClientCapabilitiesItem struct { + // SnippetSupport client supports snippets as insert text. + // + // A snippet can define tab stops and placeholders with `$1`, `$2` + // and `${3:foo}`. `$0` defines the final tab stop, it defaults to + // the end of the snippet. Placeholders with equal identifiers are linked, + // that is typing in one will update others too. + SnippetSupport bool `json:"snippetSupport,omitempty"` + + // CommitCharactersSupport client supports commit characters on a completion item. + CommitCharactersSupport bool `json:"commitCharactersSupport,omitempty"` + + // DocumentationFormat client supports the follow content formats for the documentation + // property. The order describes the preferred format of the client. + DocumentationFormat []MarkupKind `json:"documentationFormat,omitempty"` + + // DeprecatedSupport client supports the deprecated property on a completion item. + DeprecatedSupport bool `json:"deprecatedSupport,omitempty"` + + // PreselectSupport client supports the preselect property on a completion item. + PreselectSupport bool `json:"preselectSupport,omitempty"` + + // TagSupport is the client supports the tag property on a completion item. + // + // Clients supporting tags have to handle unknown tags gracefully. + // Clients especially need to preserve unknown tags when sending + // a completion item back to the server in a resolve call. + // + // @since 3.15.0. + TagSupport *CompletionTextDocumentClientCapabilitiesItemTagSupport `json:"tagSupport,omitempty"` + + // InsertReplaceSupport client supports insert replace edit to control different behavior if + // a completion item is inserted in the text or should replace text. + // + // @since 3.16.0. + InsertReplaceSupport bool `json:"insertReplaceSupport,omitempty"` + + // ResolveSupport indicates which properties a client can resolve lazily on a + // completion item. Before version 3.16.0 only the predefined properties + // `documentation` and `details` could be resolved lazily. + // + // @since 3.16.0. + ResolveSupport *CompletionTextDocumentClientCapabilitiesItemResolveSupport `json:"resolveSupport,omitempty"` + + // InsertTextModeSupport is the client supports the `insertTextMode` property on + // a completion item to override the whitespace handling mode + // as defined by the client (see `insertTextMode`). + // + // @since 3.16.0. + InsertTextModeSupport *CompletionTextDocumentClientCapabilitiesItemInsertTextModeSupport `json:"insertTextModeSupport,omitempty"` +} + +// CompletionTextDocumentClientCapabilitiesItemTagSupport specific capabilities for the "TagSupport" in the "textDocument/completion" request. +// +// @since 3.15.0. +type CompletionTextDocumentClientCapabilitiesItemTagSupport struct { + // ValueSet is the tags supported by the client. + // + // @since 3.15.0. + ValueSet []CompletionItemTag `json:"valueSet,omitempty"` +} + +// CompletionTextDocumentClientCapabilitiesItemResolveSupport specific capabilities for the ResolveSupport in the CompletionTextDocumentClientCapabilitiesItem. +// +// @since 3.16.0. +type CompletionTextDocumentClientCapabilitiesItemResolveSupport struct { + // Properties is the properties that a client can resolve lazily. + Properties []string `json:"properties"` +} + +// CompletionTextDocumentClientCapabilitiesItemInsertTextModeSupport specific capabilities for the InsertTextModeSupport in the CompletionTextDocumentClientCapabilitiesItem. +// +// @since 3.16.0. +type CompletionTextDocumentClientCapabilitiesItemInsertTextModeSupport struct { + // ValueSet is the tags supported by the client. + // + // @since 3.16.0. + ValueSet []InsertTextMode `json:"valueSet,omitempty"` +} + +// CompletionTextDocumentClientCapabilitiesItemKind specific capabilities for the "CompletionItemKind" in the "textDocument/completion" request. +type CompletionTextDocumentClientCapabilitiesItemKind struct { + // The completion item kind values the client supports. When this + // property exists the client also guarantees that it will + // handle values outside its set gracefully and falls back + // to a default value when unknown. + // + // If this property is not present the client only supports + // the completion items kinds from `Text` to `Reference` as defined in + // the initial version of the protocol. + // + ValueSet []CompletionItemKind `json:"valueSet,omitempty"` +} + +// HoverTextDocumentClientCapabilities capabilities specific to the "textDocument/hover". +type HoverTextDocumentClientCapabilities struct { + // DynamicRegistration whether hover supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // ContentFormat is the client supports the follow content formats for the content + // property. The order describes the preferred format of the client. + ContentFormat []MarkupKind `json:"contentFormat,omitempty"` +} + +// SignatureHelpTextDocumentClientCapabilities capabilities specific to the "textDocument/signatureHelp". +type SignatureHelpTextDocumentClientCapabilities struct { + // DynamicRegistration whether signature help supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // SignatureInformation is the client supports the following "SignatureInformation" + // specific properties. + SignatureInformation *TextDocumentClientCapabilitiesSignatureInformation `json:"signatureInformation,omitempty"` + + // ContextSupport is the client supports to send additional context information for a "textDocument/signatureHelp" request. + // + // A client that opts into contextSupport will also support the "retriggerCharacters" on "SignatureHelpOptions". + // + // @since 3.15.0. + ContextSupport bool `json:"contextSupport,omitempty"` +} + +// TextDocumentClientCapabilitiesSignatureInformation is the client supports the following "SignatureInformation" +// specific properties. +type TextDocumentClientCapabilitiesSignatureInformation struct { + // DocumentationFormat is the client supports the follow content formats for the documentation + // property. The order describes the preferred format of the client. + DocumentationFormat []MarkupKind `json:"documentationFormat,omitempty"` + + // ParameterInformation is the Client capabilities specific to parameter information. + ParameterInformation *TextDocumentClientCapabilitiesParameterInformation `json:"parameterInformation,omitempty"` + + // ActiveParameterSupport is the client supports the `activeParameter` property on + // `SignatureInformation` literal. + // + // @since 3.16.0. + ActiveParameterSupport bool `json:"activeParameterSupport,omitempty"` +} + +// TextDocumentClientCapabilitiesParameterInformation is the client capabilities specific to parameter information. +type TextDocumentClientCapabilitiesParameterInformation struct { + // LabelOffsetSupport is the client supports processing label offsets instead of a + // simple label string. + // + // @since 3.14.0. + LabelOffsetSupport bool `json:"labelOffsetSupport,omitempty"` +} + +// DeclarationTextDocumentClientCapabilities capabilities specific to the "textDocument/declaration". +type DeclarationTextDocumentClientCapabilities struct { + // DynamicRegistration whether declaration supports dynamic registration. If this is set to `true` + // the client supports the new `(TextDocumentRegistrationOptions & StaticRegistrationOptions)` + // return value for the corresponding server capability as well. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // LinkSupport is the client supports additional metadata in the form of declaration links. + // + // @since 3.14.0. + LinkSupport bool `json:"linkSupport,omitempty"` +} + +// DefinitionTextDocumentClientCapabilities capabilities specific to the "textDocument/definition". +// +// @since 3.14.0. +type DefinitionTextDocumentClientCapabilities struct { + // DynamicRegistration whether definition supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // LinkSupport is the client supports additional metadata in the form of definition links. + LinkSupport bool `json:"linkSupport,omitempty"` +} + +// TypeDefinitionTextDocumentClientCapabilities capabilities specific to the "textDocument/typeDefinition". +// +// @since 3.6.0. +type TypeDefinitionTextDocumentClientCapabilities struct { + // DynamicRegistration whether typeDefinition supports dynamic registration. If this is set to `true` + // the client supports the new "(TextDocumentRegistrationOptions & StaticRegistrationOptions)" + // return value for the corresponding server capability as well. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // LinkSupport is the client supports additional metadata in the form of definition links. + // + // @since 3.14.0 + LinkSupport bool `json:"linkSupport,omitempty"` +} + +// ImplementationTextDocumentClientCapabilities capabilities specific to the "textDocument/implementation". +// +// @since 3.6.0. +type ImplementationTextDocumentClientCapabilities struct { + // DynamicRegistration whether implementation supports dynamic registration. If this is set to `true` + // the client supports the new "(TextDocumentRegistrationOptions & StaticRegistrationOptions)" + // return value for the corresponding server capability as well. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // LinkSupport is the client supports additional metadata in the form of definition links. + // + // @since 3.14.0 + LinkSupport bool `json:"linkSupport,omitempty"` +} + +// ReferencesTextDocumentClientCapabilities capabilities specific to the "textDocument/references". +type ReferencesTextDocumentClientCapabilities struct { + // DynamicRegistration whether references supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// DocumentHighlightClientCapabilities capabilities specific to the "textDocument/documentHighlight". +type DocumentHighlightClientCapabilities struct { + // DynamicRegistration Whether document highlight supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// DocumentSymbolClientCapabilities capabilities specific to the "textDocument/documentSymbol". +type DocumentSymbolClientCapabilities struct { + // DynamicRegistration whether document symbol supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // SymbolKind specific capabilities for the "SymbolKindCapabilities". + SymbolKind *SymbolKindCapabilities `json:"symbolKind,omitempty"` + + // HierarchicalDocumentSymbolSupport is the client support hierarchical document symbols. + HierarchicalDocumentSymbolSupport bool `json:"hierarchicalDocumentSymbolSupport,omitempty"` + + // TagSupport is the client supports tags on "SymbolInformation". Tags are supported on + // "DocumentSymbol" if "HierarchicalDocumentSymbolSupport" is set to true. + // Clients supporting tags have to handle unknown tags gracefully. + // + // @since 3.16.0. + TagSupport *DocumentSymbolClientCapabilitiesTagSupport `json:"tagSupport,omitempty"` + + // LabelSupport is the client supports an additional label presented in the UI when + // registering a document symbol provider. + // + // @since 3.16.0. + LabelSupport bool `json:"labelSupport,omitempty"` +} + +// DocumentSymbolClientCapabilitiesTagSupport TagSupport in the DocumentSymbolClientCapabilities. +// +// @since 3.16.0. +type DocumentSymbolClientCapabilitiesTagSupport struct { + // ValueSet is the tags supported by the client. + ValueSet []SymbolTag `json:"valueSet"` +} + +// CodeActionClientCapabilities capabilities specific to the "textDocument/codeAction". +type CodeActionClientCapabilities struct { + // DynamicRegistration whether code action supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // CodeActionLiteralSupport is the client support code action literals as a valid + // response of the "textDocument/codeAction" request. + // + // @since 3.8.0 + CodeActionLiteralSupport *CodeActionClientCapabilitiesLiteralSupport `json:"codeActionLiteralSupport,omitempty"` + + // IsPreferredSupport whether code action supports the "isPreferred" property. + // + // @since 3.15.0. + IsPreferredSupport bool `json:"isPreferredSupport,omitempty"` + + // DisabledSupport whether code action supports the `disabled` property. + // + // @since 3.16.0. + DisabledSupport bool `json:"disabledSupport,omitempty"` + + // DataSupport whether code action supports the `data` property which is + // preserved between a `textDocument/codeAction` and a + // `codeAction/resolve` request. + // + // @since 3.16.0. + DataSupport bool `json:"dataSupport,omitempty"` + + // ResolveSupport whether the client supports resolving additional code action + // properties via a separate `codeAction/resolve` request. + // + // @since 3.16.0. + ResolveSupport *CodeActionClientCapabilitiesResolveSupport `json:"resolveSupport,omitempty"` + + // HonorsChangeAnnotations whether the client honors the change annotations in + // text edits and resource operations returned via the + // `CodeAction#edit` property by for example presenting + // the workspace edit in the user interface and asking + // for confirmation. + // + // @since 3.16.0. + HonorsChangeAnnotations bool `json:"honorsChangeAnnotations,omitempty"` +} + +// CodeActionClientCapabilitiesLiteralSupport is the client support code action literals as a valid response of the "textDocument/codeAction" request. +type CodeActionClientCapabilitiesLiteralSupport struct { + // CodeActionKind is the code action kind is support with the following value + // set. + CodeActionKind *CodeActionClientCapabilitiesKind `json:"codeActionKind"` +} + +// CodeActionClientCapabilitiesKind is the code action kind is support with the following value set. +type CodeActionClientCapabilitiesKind struct { + // ValueSet is the code action kind values the client supports. When this + // property exists the client also guarantees that it will + // handle values outside its set gracefully and falls back + // to a default value when unknown. + ValueSet []CodeActionKind `json:"valueSet"` +} + +// CodeActionClientCapabilitiesResolveSupport ResolveSupport in the CodeActionClientCapabilities. +// +// @since 3.16.0. +type CodeActionClientCapabilitiesResolveSupport struct { + // Properties is the properties that a client can resolve lazily. + Properties []string `json:"properties"` +} + +// CodeLensClientCapabilities capabilities specific to the "textDocument/codeLens". +type CodeLensClientCapabilities struct { + // DynamicRegistration Whether code lens supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// DocumentLinkClientCapabilities capabilities specific to the "textDocument/documentLink". +type DocumentLinkClientCapabilities struct { + // DynamicRegistration whether document link supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // TooltipSupport whether the client supports the "tooltip" property on "DocumentLink". + // + // @since 3.15.0. + TooltipSupport bool `json:"tooltipSupport,omitempty"` +} + +// DocumentColorClientCapabilities capabilities specific to the "textDocument/documentColor" and the +// "textDocument/colorPresentation" request. +// +// @since 3.6.0. +type DocumentColorClientCapabilities struct { + // DynamicRegistration whether colorProvider supports dynamic registration. If this is set to `true` + // the client supports the new "(ColorProviderOptions & TextDocumentRegistrationOptions & StaticRegistrationOptions)" + // return value for the corresponding server capability as well. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// DocumentFormattingClientCapabilities capabilities specific to the "textDocument/formatting". +type DocumentFormattingClientCapabilities struct { + // DynamicRegistration whether code lens supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// DocumentRangeFormattingClientCapabilities capabilities specific to the "textDocument/rangeFormatting". +type DocumentRangeFormattingClientCapabilities struct { + // DynamicRegistration whether code lens supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// DocumentOnTypeFormattingClientCapabilities capabilities specific to the "textDocument/onTypeFormatting". +type DocumentOnTypeFormattingClientCapabilities struct { + // DynamicRegistration whether code lens supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// PublishDiagnosticsClientCapabilities capabilities specific to "textDocument/publishDiagnostics". +type PublishDiagnosticsClientCapabilities struct { + // RelatedInformation whether the clients accepts diagnostics with related information. + RelatedInformation bool `json:"relatedInformation,omitempty"` + + // TagSupport clients supporting tags have to handle unknown tags gracefully. + // + // @since 3.15.0. + TagSupport *PublishDiagnosticsClientCapabilitiesTagSupport `json:"tagSupport,omitempty"` + + // VersionSupport whether the client interprets the version property of the + // "textDocument/publishDiagnostics" notification`s parameter. + // + // @since 3.15.0. + VersionSupport bool `json:"versionSupport,omitempty"` + + // CodeDescriptionSupport client supports a codeDescription property + // + // @since 3.16.0. + CodeDescriptionSupport bool `json:"codeDescriptionSupport,omitempty"` + + // DataSupport whether code action supports the `data` property which is + // preserved between a `textDocument/publishDiagnostics` and + // `textDocument/codeAction` request. + // + // @since 3.16.0. + DataSupport bool `json:"dataSupport,omitempty"` +} + +// PublishDiagnosticsClientCapabilitiesTagSupport is the client capacity of TagSupport. +// +// @since 3.15.0. +type PublishDiagnosticsClientCapabilitiesTagSupport struct { + // ValueSet is the tags supported by the client. + ValueSet []DiagnosticTag `json:"valueSet"` +} + +// RenameClientCapabilities capabilities specific to the "textDocument/rename". +type RenameClientCapabilities struct { + // DynamicRegistration whether rename supports dynamic registration. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // PrepareSupport is the client supports testing for validity of rename operations + // before execution. + PrepareSupport bool `json:"prepareSupport,omitempty"` + + // PrepareSupportDefaultBehavior client supports the default behavior result + // (`{ defaultBehavior: boolean }`). + // + // The value indicates the default behavior used by the + // client. + // + // @since 3.16.0. + PrepareSupportDefaultBehavior PrepareSupportDefaultBehavior `json:"prepareSupportDefaultBehavior,omitempty"` + + // HonorsChangeAnnotations whether th client honors the change annotations in + // text edits and resource operations returned via the + // rename request's workspace edit by for example presenting + // the workspace edit in the user interface and asking + // for confirmation. + // + // @since 3.16.0. + HonorsChangeAnnotations bool `json:"honorsChangeAnnotations,omitempty"` +} + +// PrepareSupportDefaultBehavior default behavior of PrepareSupport. +// +// @since 3.16.0. +type PrepareSupportDefaultBehavior float64 + +// list of PrepareSupportDefaultBehavior. +const ( + // PrepareSupportDefaultBehaviorIdentifier is the client's default behavior is to select the identifier + // according the to language's syntax rule. + PrepareSupportDefaultBehaviorIdentifier PrepareSupportDefaultBehavior = 1 +) + +// String returns a string representation of the PrepareSupportDefaultBehavior. +func (k PrepareSupportDefaultBehavior) String() string { + switch k { + case PrepareSupportDefaultBehaviorIdentifier: + return "Identifier" + default: + return strconv.FormatFloat(float64(k), 'f', -10, 64) + } +} + +// FoldingRangeClientCapabilities capabilities specific to "textDocument/foldingRange" requests. +// +// @since 3.10.0. +type FoldingRangeClientCapabilities struct { + // DynamicRegistration whether implementation supports dynamic registration for folding range providers. If this is set to `true` + // the client supports the new "(FoldingRangeProviderOptions & TextDocumentRegistrationOptions & StaticRegistrationOptions)" + // return value for the corresponding server capability as well. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // RangeLimit is the maximum number of folding ranges that the client prefers to receive per document. The value serves as a + // hint, servers are free to follow the limit. + RangeLimit uint32 `json:"rangeLimit,omitempty"` + + // LineFoldingOnly if set, the client signals that it only supports folding complete lines. If set, client will + // ignore specified "startCharacter" and "endCharacter" properties in a FoldingRange. + LineFoldingOnly bool `json:"lineFoldingOnly,omitempty"` +} + +// SelectionRangeClientCapabilities capabilities specific to "textDocument/selectionRange" requests. +// +// @since 3.16.0. +type SelectionRangeClientCapabilities struct { + // DynamicRegistration whether implementation supports dynamic registration for selection range providers. If this is set to `true` + // the client supports the new "(SelectionRangeProviderOptions & TextDocumentRegistrationOptions & StaticRegistrationOptions)" + // return value for the corresponding server capability as well. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// CallHierarchyClientCapabilities capabilities specific to "textDocument/callHierarchy" requests. +// +// @since 3.16.0. +type CallHierarchyClientCapabilities struct { + // DynamicRegistration whether implementation supports dynamic registration. If this is set to + // `true` the client supports the new `(TextDocumentRegistrationOptions & + // StaticRegistrationOptions)` return value for the corresponding server + // capability as well.} + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// SemanticTokensClientCapabilities capabilities specific to the "textDocument.semanticTokens" request. +// +// @since 3.16.0. +type SemanticTokensClientCapabilities struct { + // DynamicRegistration whether implementation supports dynamic registration. If this is set to + // `true` the client supports the new `(TextDocumentRegistrationOptions & + // StaticRegistrationOptions)` return value for the corresponding server + // capability as well. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` + + // Requests which requests the client supports and might send to the server + // depending on the server's capability. Please note that clients might not + // show semantic tokens or degrade some of the user experience if a range + // or full request is advertised by the client but not provided by the + // server. If for example the client capability `requests.full` and + // `request.range` are both set to true but the server only provides a + // range provider the client might not render a minimap correctly or might + // even decide to not show any semantic tokens at all. + Requests SemanticTokensWorkspaceClientCapabilitiesRequests `json:"requests"` + + // TokenTypes is the token types that the client supports. + TokenTypes []string `json:"tokenTypes"` + + // TokenModifiers is the token modifiers that the client supports. + TokenModifiers []string `json:"tokenModifiers"` + + // Formats is the formats the clients supports. + Formats []TokenFormat `json:"formats"` + + // OverlappingTokenSupport whether the client supports tokens that can overlap each other. + OverlappingTokenSupport bool `json:"overlappingTokenSupport,omitempty"` + + // MultilineTokenSupport whether the client supports tokens that can span multiple lines. + MultilineTokenSupport bool `json:"multilineTokenSupport,omitempty"` +} + +// SemanticTokensWorkspaceClientCapabilitiesRequests capabilities specific to the "textDocument/semanticTokens/xxx" request. +// +// @since 3.16.0. +type SemanticTokensWorkspaceClientCapabilitiesRequests struct { + // Range is the client will send the "textDocument/semanticTokens/range" request + // if the server provides a corresponding handler. + Range bool `json:"range,omitempty"` + + // Full is the client will send the "textDocument/semanticTokens/full" request + // if the server provides a corresponding handler. The client will send the + // `textDocument/semanticTokens/full/delta` request if the server provides a + // corresponding handler. + Full interface{} `json:"full,omitempty"` +} + +// LinkedEditingRangeClientCapabilities capabilities specific to "textDocument/linkedEditingRange" requests. +// +// @since 3.16.0. +type LinkedEditingRangeClientCapabilities struct { + // DynamicRegistration whether implementation supports dynamic registration. + // If this is set to `true` the client supports the new + // `(TextDocumentRegistrationOptions & StaticRegistrationOptions)` + // return value for the corresponding server capability as well. + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// MonikerClientCapabilities capabilities specific to the "textDocument/moniker" request. +// +// @since 3.16.0. +type MonikerClientCapabilities struct { + // DynamicRegistration whether implementation supports dynamic registration. If this is set to + // `true` the client supports the new `(TextDocumentRegistrationOptions & + // StaticRegistrationOptions)` return value for the corresponding server + // capability as well.// DynamicRegistration whether implementation supports dynamic registration. If this is set to + DynamicRegistration bool `json:"dynamicRegistration,omitempty"` +} + +// WindowClientCapabilities represents a WindowClientCapabilities specific client capabilities. +// +// @since 3.15.0. +type WindowClientCapabilities struct { + // WorkDoneProgress whether client supports handling progress notifications. If set servers are allowed to + // report in "workDoneProgress" property in the request specific server capabilities. + // + // @since 3.15.0. + WorkDoneProgress bool `json:"workDoneProgress,omitempty"` + + // ShowMessage capabilities specific to the showMessage request. + // + // @since 3.16.0. + ShowMessage *ShowMessageRequestClientCapabilities `json:"showMessage,omitempty"` + + // ShowDocument client capabilities for the show document request. + // + // @since 3.16.0. + ShowDocument *ShowDocumentClientCapabilities `json:"showDocument,omitempty"` +} + +// ShowMessageRequestClientCapabilities show message request client capabilities. +// +// @since 3.16.0. +type ShowMessageRequestClientCapabilities struct { + // MessageActionItem capabilities specific to the "MessageActionItem" type. + MessageActionItem *ShowMessageRequestClientCapabilitiesMessageActionItem `json:"messageActionItem,omitempty"` +} + +// ShowMessageRequestClientCapabilitiesMessageActionItem capabilities specific to the "MessageActionItem" type. +// +// @since 3.16.0. +type ShowMessageRequestClientCapabilitiesMessageActionItem struct { + // AdditionalPropertiesSupport whether the client supports additional attributes which + // are preserved and sent back to the server in the + // request's response. + AdditionalPropertiesSupport bool `json:"additionalPropertiesSupport,omitempty"` +} + +// ShowDocumentClientCapabilities client capabilities for the show document request. +// +// @since 3.16.0. +type ShowDocumentClientCapabilities struct { + // Support is the client has support for the show document + // request. + Support bool `json:"support"` +} + +// GeneralClientCapabilities represents a General specific client capabilities. +// +// @since 3.16.0. +type GeneralClientCapabilities struct { + // RegularExpressions is the client capabilities specific to regular expressions. + // + // @since 3.16.0. + RegularExpressions *RegularExpressionsClientCapabilities `json:"regularExpressions,omitempty"` + + // Markdown client capabilities specific to the client's markdown parser. + // + // @since 3.16.0. + Markdown *MarkdownClientCapabilities `json:"markdown,omitempty"` +} + +// RegularExpressionsClientCapabilities represents a client capabilities specific to regular expressions. +// +// The following features from the ECMAScript 2020 regular expression specification are NOT mandatory for a client: +// +// Assertions +// Lookahead assertion, Negative lookahead assertion, lookbehind assertion, negative lookbehind assertion. +// Character classes +// Matching control characters using caret notation (e.g. "\cX") and matching UTF-16 code units (e.g. "\uhhhh"). +// Group and ranges +// Named capturing groups. +// Unicode property escapes +// None of the features needs to be supported. +// +// The only regular expression flag that a client needs to support is "i" to specify a case insensitive search. +// +// @since 3.16.0. +type RegularExpressionsClientCapabilities struct { + // Engine is the engine's name. + // + // Well known engine name is "ECMAScript". + // https://tc39.es/ecma262/#sec-regexp-regular-expression-objects + // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions + Engine string `json:"engine"` + + // Version is the engine's version. + // + // Well known engine version is "ES2020". + // https://tc39.es/ecma262/#sec-regexp-regular-expression-objects + // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions + Version string `json:"version,omitempty"` +} + +// MarkdownClientCapabilities represents a client capabilities specific to the used markdown parser. +// +// @since 3.16.0. +type MarkdownClientCapabilities struct { + // Parser is the name of the parser. + Parser string `json:"parser"` + + // version is the version of the parser. + Version string `json:"version,omitempty"` +} diff --git a/vendor/go.lsp.dev/protocol/capabilities_server.go b/vendor/go.lsp.dev/protocol/capabilities_server.go new file mode 100644 index 00000000000..40ae88a6dfc --- /dev/null +++ b/vendor/go.lsp.dev/protocol/capabilities_server.go @@ -0,0 +1,523 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import ( + "strconv" +) + +// ServerCapabilities efines the capabilities provided by a language server. +type ServerCapabilities struct { + // TextDocumentSync defines how text documents are synced. Is either a detailed structure defining each notification + // or for backwards compatibility the TextDocumentSyncKind number. + // + // If omitted it defaults to TextDocumentSyncKind.None` + TextDocumentSync interface{} `json:"textDocumentSync,omitempty"` // *TextDocumentSyncOptions | TextDocumentSyncKind + + // CompletionProvider is The server provides completion support. + CompletionProvider *CompletionOptions `json:"completionProvider,omitempty"` + + // HoverProvider is the server provides hover support. + HoverProvider interface{} `json:"hoverProvider,omitempty"` // TODO(zchee): bool | *HoverOptions + + // SignatureHelpProvider is the server provides signature help support. + SignatureHelpProvider *SignatureHelpOptions `json:"signatureHelpProvider,omitempty"` + + // DeclarationProvider is the server provides Goto Declaration support. + // + // @since 3.14.0. + DeclarationProvider interface{} `json:"declarationProvider,omitempty"` // TODO(zchee): bool | *DeclarationOptions | *DeclarationRegistrationOptions + + // DefinitionProvider is the server provides Goto definition support. + DefinitionProvider interface{} `json:"definitionProvider,omitempty"` // TODO(zchee): bool | *DefinitionOptions + + // TypeDefinitionProvider is the provides Goto Type Definition support. + // + // @since 3.6.0. + TypeDefinitionProvider interface{} `json:"typeDefinitionProvider,omitempty"` // TODO(zchee): bool | *TypeDefinitionOptions | *TypeDefinitionRegistrationOptions + + // ImplementationProvider is the provides Goto Implementation support. + // + // @since 3.6.0. + ImplementationProvider interface{} `json:"implementationProvider,omitempty"` // TODO(zchee): bool | *ImplementationOptions | *ImplementationRegistrationOptions + + // ReferencesProvider is the server provides find references support. + ReferencesProvider interface{} `json:"referencesProvider,omitempty"` // TODO(zchee): bool | *ReferenceOptions + + // DocumentHighlightProvider is the server provides document highlight support. + DocumentHighlightProvider interface{} `json:"documentHighlightProvider,omitempty"` // TODO(zchee): bool | *DocumentHighlightOptions + + // DocumentSymbolProvider is the server provides document symbol support. + DocumentSymbolProvider interface{} `json:"documentSymbolProvider,omitempty"` // TODO(zchee): bool | *DocumentSymbolOptions + + // CodeActionProvider is the server provides code actions. + // + // CodeActionOptions may only be specified if the client states that it supports CodeActionLiteralSupport in its + // initial Initialize request. + CodeActionProvider interface{} `json:"codeActionProvider,omitempty"` // TODO(zchee): bool | *CodeActionOptions + + // CodeLensProvider is the server provides code lens. + CodeLensProvider *CodeLensOptions `json:"codeLensProvider,omitempty"` + + // The server provides document link support. + DocumentLinkProvider *DocumentLinkOptions `json:"documentLinkProvider,omitempty"` + + // ColorProvider is the server provides color provider support. + // + // @since 3.6.0. + ColorProvider interface{} `json:"colorProvider,omitempty"` // TODO(zchee): bool | *DocumentColorOptions | *DocumentColorRegistrationOptions + + // WorkspaceSymbolProvider is the server provides workspace symbol support. + WorkspaceSymbolProvider interface{} `json:"workspaceSymbolProvider,omitempty"` // TODO(zchee): bool | *WorkspaceSymbolOptions + + // DocumentFormattingProvider is the server provides document formatting. + DocumentFormattingProvider interface{} `json:"documentFormattingProvider,omitempty"` // TODO(zchee): bool | *DocumentFormattingOptions + + // DocumentRangeFormattingProvider is the server provides document range formatting. + DocumentRangeFormattingProvider interface{} `json:"documentRangeFormattingProvider,omitempty"` // TODO(zchee): bool | *DocumentRangeFormattingOptions + + // DocumentOnTypeFormattingProvider is the server provides document formatting on typing. + DocumentOnTypeFormattingProvider *DocumentOnTypeFormattingOptions `json:"documentOnTypeFormattingProvider,omitempty"` + + // RenameProvider is the server provides rename support. + // + // RenameOptions may only be specified if the client states that it supports PrepareSupport in its + // initial Initialize request. + RenameProvider interface{} `json:"renameProvider,omitempty"` // TODO(zchee): bool | *RenameOptions + + // FoldingRangeProvider is the server provides folding provider support. + // + // @since 3.10.0. + FoldingRangeProvider interface{} `json:"foldingRangeProvider,omitempty"` // TODO(zchee): bool | *FoldingRangeOptions | *FoldingRangeRegistrationOptions + + // SelectionRangeProvider is the server provides selection range support. + // + // @since 3.15.0. + SelectionRangeProvider interface{} `json:"selectionRangeProvider,omitempty"` // TODO(zchee): bool | *SelectionRangeOptions | *SelectionRangeRegistrationOptions + + // ExecuteCommandProvider is the server provides execute command support. + ExecuteCommandProvider *ExecuteCommandOptions `json:"executeCommandProvider,omitempty"` + + // CallHierarchyProvider is the server provides call hierarchy support. + // + // @since 3.16.0. + CallHierarchyProvider interface{} `json:"callHierarchyProvider,omitempty"` // TODO(zchee): bool | *CallHierarchyOptions | *CallHierarchyRegistrationOptions + + // LinkedEditingRangeProvider is the server provides linked editing range support. + // + // @since 3.16.0. + LinkedEditingRangeProvider interface{} `json:"linkedEditingRangeProvider,omitempty"` // TODO(zchee): bool | *LinkedEditingRangeOptions | *LinkedEditingRangeRegistrationOptions + + // SemanticTokensProvider is the server provides semantic tokens support. + // + // @since 3.16.0. + SemanticTokensProvider interface{} `json:"semanticTokensProvider,omitempty"` // TODO(zchee): *SemanticTokensOptions | *SemanticTokensRegistrationOptions + + // Workspace is the window specific server capabilities. + Workspace *ServerCapabilitiesWorkspace `json:"workspace,omitempty"` + + // MonikerProvider is the server provides moniker support. + // + // @since 3.16.0. + MonikerProvider interface{} `json:"monikerProvider,omitempty"` // TODO(zchee): bool | *MonikerOptions | *MonikerRegistrationOptions + + // Experimental server capabilities. + Experimental interface{} `json:"experimental,omitempty"` +} + +// TextDocumentSyncOptions TextDocumentSync options. +type TextDocumentSyncOptions struct { + // OpenClose open and close notifications are sent to the server. + OpenClose bool `json:"openClose,omitempty"` + + // Change notifications are sent to the server. See TextDocumentSyncKind.None, TextDocumentSyncKind.Full + // and TextDocumentSyncKind.Incremental. If omitted it defaults to TextDocumentSyncKind.None. + Change TextDocumentSyncKind `json:"change,omitempty"` + + // WillSave notifications are sent to the server. + WillSave bool `json:"willSave,omitempty"` + + // WillSaveWaitUntil will save wait until requests are sent to the server. + WillSaveWaitUntil bool `json:"willSaveWaitUntil,omitempty"` + + // Save notifications are sent to the server. + Save *SaveOptions `json:"save,omitempty"` +} + +// SaveOptions save options. +type SaveOptions struct { + // IncludeText is the client is supposed to include the content on save. + IncludeText bool `json:"includeText,omitempty"` +} + +// TextDocumentSyncKind defines how the host (editor) should sync document changes to the language server. +type TextDocumentSyncKind float64 + +const ( + // TextDocumentSyncKindNone documents should not be synced at all. + TextDocumentSyncKindNone TextDocumentSyncKind = 0 + + // TextDocumentSyncKindFull documents are synced by always sending the full content + // of the document. + TextDocumentSyncKindFull TextDocumentSyncKind = 1 + + // TextDocumentSyncKindIncremental documents are synced by sending the full content on open. + // After that only incremental updates to the document are + // send. + TextDocumentSyncKindIncremental TextDocumentSyncKind = 2 +) + +// String implements fmt.Stringer. +func (k TextDocumentSyncKind) String() string { + switch k { + case TextDocumentSyncKindNone: + return "None" + case TextDocumentSyncKindFull: + return "Full" + case TextDocumentSyncKindIncremental: + return "Incremental" + default: + return strconv.FormatFloat(float64(k), 'f', -10, 64) + } +} + +// CompletionOptions Completion options. +type CompletionOptions struct { + // The server provides support to resolve additional + // information for a completion item. + ResolveProvider bool `json:"resolveProvider,omitempty"` + + // The characters that trigger completion automatically. + TriggerCharacters []string `json:"triggerCharacters,omitempty"` +} + +// HoverOptions option of hover provider server capabilities. +type HoverOptions struct { + WorkDoneProgressOptions +} + +// SignatureHelpOptions SignatureHelp options. +type SignatureHelpOptions struct { + // The characters that trigger signature help + // automatically. + TriggerCharacters []string `json:"triggerCharacters,omitempty"` + + // RetriggerCharacters is the slist of characters that re-trigger signature help. + // + // These trigger characters are only active when signature help is already + // showing. + // All trigger characters are also counted as re-trigger characters. + // + // @since 3.15.0. + RetriggerCharacters []string `json:"retriggerCharacters,omitempty"` +} + +// DeclarationOptions registration option of Declaration server capability. +// +// @since 3.15.0. +type DeclarationOptions struct { + WorkDoneProgressOptions +} + +// DeclarationRegistrationOptions registration option of Declaration server capability. +// +// @since 3.15.0. +type DeclarationRegistrationOptions struct { + DeclarationOptions + TextDocumentRegistrationOptions + StaticRegistrationOptions +} + +// DefinitionOptions registration option of Definition server capability. +// +// @since 3.15.0. +type DefinitionOptions struct { + WorkDoneProgressOptions +} + +// TypeDefinitionOptions registration option of TypeDefinition server capability. +// +// @since 3.15.0. +type TypeDefinitionOptions struct { + WorkDoneProgressOptions +} + +// TypeDefinitionRegistrationOptions registration option of TypeDefinition server capability. +// +// @since 3.15.0. +type TypeDefinitionRegistrationOptions struct { + TextDocumentRegistrationOptions + TypeDefinitionOptions + StaticRegistrationOptions +} + +// ImplementationOptions registration option of Implementation server capability. +// +// @since 3.15.0. +type ImplementationOptions struct { + WorkDoneProgressOptions +} + +// ImplementationRegistrationOptions registration option of Implementation server capability. +// +// @since 3.15.0. +type ImplementationRegistrationOptions struct { + TextDocumentRegistrationOptions + ImplementationOptions + StaticRegistrationOptions +} + +// ReferenceOptions registration option of Reference server capability. +type ReferenceOptions struct { + WorkDoneProgressOptions +} + +// DocumentHighlightOptions registration option of DocumentHighlight server capability. +// +// @since 3.15.0. +type DocumentHighlightOptions struct { + WorkDoneProgressOptions +} + +// DocumentSymbolOptions registration option of DocumentSymbol server capability. +// +// @since 3.15.0. +type DocumentSymbolOptions struct { + WorkDoneProgressOptions + + // Label a human-readable string that is shown when multiple outlines trees + // are shown for the same document. + // + // @since 3.16.0. + Label string `json:"label,omitempty"` +} + +// CodeActionOptions CodeAction options. +type CodeActionOptions struct { + // CodeActionKinds that this server may return. + // + // The list of kinds may be generic, such as "CodeActionKind.Refactor", or the server + // may list out every specific kind they provide. + CodeActionKinds []CodeActionKind `json:"codeActionKinds,omitempty"` + + // ResolveProvider is the server provides support to resolve additional + // information for a code action. + // + // @since 3.16.0. + ResolveProvider bool `json:"resolveProvider,omitempty"` +} + +// CodeLensOptions CodeLens options. +type CodeLensOptions struct { + // Code lens has a resolve provider as well. + ResolveProvider bool `json:"resolveProvider,omitempty"` +} + +// DocumentLinkOptions document link options. +type DocumentLinkOptions struct { + // ResolveProvider document links have a resolve provider as well. + ResolveProvider bool `json:"resolveProvider,omitempty"` +} + +// DocumentColorOptions registration option of DocumentColor server capability. +// +// @since 3.15.0. +type DocumentColorOptions struct { + WorkDoneProgressOptions +} + +// DocumentColorRegistrationOptions registration option of DocumentColor server capability. +// +// @since 3.15.0. +type DocumentColorRegistrationOptions struct { + TextDocumentRegistrationOptions + StaticRegistrationOptions + DocumentColorOptions +} + +// WorkspaceSymbolOptions registration option of WorkspaceSymbol server capability. +// +// @since 3.15.0. +type WorkspaceSymbolOptions struct { + WorkDoneProgressOptions +} + +// DocumentFormattingOptions registration option of DocumentFormatting server capability. +// +// @since 3.15.0. +type DocumentFormattingOptions struct { + WorkDoneProgressOptions +} + +// DocumentRangeFormattingOptions registration option of DocumentRangeFormatting server capability. +// +// @since 3.15.0. +type DocumentRangeFormattingOptions struct { + WorkDoneProgressOptions +} + +// DocumentOnTypeFormattingOptions format document on type options. +type DocumentOnTypeFormattingOptions struct { + // FirstTriggerCharacter a character on which formatting should be triggered, like "}". + FirstTriggerCharacter string `json:"firstTriggerCharacter"` + + // MoreTriggerCharacter more trigger characters. + MoreTriggerCharacter []string `json:"moreTriggerCharacter,omitempty"` +} + +// RenameOptions rename options. +type RenameOptions struct { + // PrepareProvider renames should be checked and tested before being executed. + PrepareProvider bool `json:"prepareProvider,omitempty"` +} + +// FoldingRangeOptions registration option of FoldingRange server capability. +// +// @since 3.15.0. +type FoldingRangeOptions struct { + WorkDoneProgressOptions +} + +// FoldingRangeRegistrationOptions registration option of FoldingRange server capability. +// +// @since 3.15.0. +type FoldingRangeRegistrationOptions struct { + TextDocumentRegistrationOptions + FoldingRangeOptions + StaticRegistrationOptions +} + +// ExecuteCommandOptions execute command options. +type ExecuteCommandOptions struct { + // Commands is the commands to be executed on the server + Commands []string `json:"commands"` +} + +// CallHierarchyOptions option of CallHierarchy. +// +// @since 3.16.0. +type CallHierarchyOptions struct { + WorkDoneProgressOptions +} + +// CallHierarchyRegistrationOptions registration options of CallHierarchy. +// +// @since 3.16.0. +type CallHierarchyRegistrationOptions struct { + TextDocumentRegistrationOptions + CallHierarchyOptions + StaticRegistrationOptions +} + +// LinkedEditingRangeOptions option of linked editing range provider server capabilities. +// +// @since 3.16.0. +type LinkedEditingRangeOptions struct { + WorkDoneProgressOptions +} + +// LinkedEditingRangeRegistrationOptions registration option of linked editing range provider server capabilities. +// +// @since 3.16.0. +type LinkedEditingRangeRegistrationOptions struct { + TextDocumentRegistrationOptions + LinkedEditingRangeOptions + StaticRegistrationOptions +} + +// SemanticTokensOptions option of semantic tokens provider server capabilities. +// +// @since 3.16.0. +type SemanticTokensOptions struct { + WorkDoneProgressOptions +} + +// SemanticTokensRegistrationOptions registration option of semantic tokens provider server capabilities. +// +// @since 3.16.0. +type SemanticTokensRegistrationOptions struct { + TextDocumentRegistrationOptions + SemanticTokensOptions + StaticRegistrationOptions +} + +// ServerCapabilitiesWorkspace specific server capabilities. +type ServerCapabilitiesWorkspace struct { + // WorkspaceFolders is the server supports workspace folder. + // + // @since 3.6.0. + WorkspaceFolders *ServerCapabilitiesWorkspaceFolders `json:"workspaceFolders,omitempty"` + + // FileOperations is the server is interested in file notifications/requests. + // + // @since 3.16.0. + FileOperations *ServerCapabilitiesWorkspaceFileOperations `json:"fileOperations,omitempty"` +} + +// ServerCapabilitiesWorkspaceFolders is the server supports workspace folder. +// +// @since 3.6.0. +type ServerCapabilitiesWorkspaceFolders struct { + // Supported is the server has support for workspace folders + Supported bool `json:"supported,omitempty"` + + // ChangeNotifications whether the server wants to receive workspace folder + // change notifications. + // + // If a strings is provided the string is treated as a ID + // under which the notification is registered on the client + // side. The ID can be used to unregister for these events + // using the `client/unregisterCapability` request. + ChangeNotifications interface{} `json:"changeNotifications,omitempty"` // string | boolean +} + +// ServerCapabilitiesWorkspaceFileOperations is the server is interested in file notifications/requests. +// +// @since 3.16.0. +type ServerCapabilitiesWorkspaceFileOperations struct { + // DidCreate is the server is interested in receiving didCreateFiles + // notifications. + DidCreate *FileOperationRegistrationOptions `json:"didCreate,omitempty"` + + // WillCreate is the server is interested in receiving willCreateFiles requests. + WillCreate *FileOperationRegistrationOptions `json:"willCreate,omitempty"` + + // DidRename is the server is interested in receiving didRenameFiles + // notifications. + DidRename *FileOperationRegistrationOptions `json:"didRename,omitempty"` + + // WillRename is the server is interested in receiving willRenameFiles requests. + WillRename *FileOperationRegistrationOptions `json:"willRename,omitempty"` + + // DidDelete is the server is interested in receiving didDeleteFiles file + // notifications. + DidDelete *FileOperationRegistrationOptions `json:"didDelete,omitempty"` + + // WillDelete is the server is interested in receiving willDeleteFiles file + // requests. + WillDelete *FileOperationRegistrationOptions `json:"willDelete,omitempty"` +} + +// FileOperationRegistrationOptions is the options to register for file operations. +// +// @since 3.16.0. +type FileOperationRegistrationOptions struct { + // filters is the actual filters. + Filters []FileOperationFilter `json:"filters"` +} + +// MonikerOptions option of moniker provider server capabilities. +// +// @since 3.16.0. +type MonikerOptions struct { + WorkDoneProgressOptions +} + +// MonikerRegistrationOptions registration option of moniker provider server capabilities. +// +// @since 3.16.0. +type MonikerRegistrationOptions struct { + TextDocumentRegistrationOptions + MonikerOptions +} diff --git a/vendor/go.lsp.dev/protocol/client.go b/vendor/go.lsp.dev/protocol/client.go new file mode 100644 index 00000000000..02109209441 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/client.go @@ -0,0 +1,412 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import ( + "bytes" + "context" + "fmt" + + "github.com/segmentio/encoding/json" + "go.uber.org/zap" + + "go.lsp.dev/jsonrpc2" + "go.lsp.dev/pkg/xcontext" +) + +// ClientDispatcher returns a Client that dispatches LSP requests across the +// given jsonrpc2 connection. +func ClientDispatcher(conn jsonrpc2.Conn, logger *zap.Logger) Client { + return &client{ + Conn: conn, + logger: logger, + } +} + +// ClientHandler handler of LSP client. +func ClientHandler(client Client, handler jsonrpc2.Handler) jsonrpc2.Handler { + h := func(ctx context.Context, reply jsonrpc2.Replier, req jsonrpc2.Request) error { + if ctx.Err() != nil { + xctx := xcontext.Detach(ctx) + + return reply(xctx, nil, ErrRequestCancelled) + } + + handled, err := clientDispatch(ctx, client, reply, req) + if handled || err != nil { + return err + } + + return handler(ctx, reply, req) + } + + return h +} + +// clientDispatch implements jsonrpc2.Handler. +//nolint:funlen,cyclop +func clientDispatch(ctx context.Context, client Client, reply jsonrpc2.Replier, req jsonrpc2.Request) (handled bool, err error) { + if ctx.Err() != nil { + return true, reply(ctx, nil, ErrRequestCancelled) + } + + dec := json.NewDecoder(bytes.NewReader(req.Params())) + logger := LoggerFromContext(ctx) + + switch req.Method() { + case MethodProgress: // notification + defer logger.Debug(MethodProgress, zap.Error(err)) + + var params ProgressParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := client.Progress(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodWorkDoneProgressCreate: // request + defer logger.Debug(MethodWorkDoneProgressCreate, zap.Error(err)) + + var params WorkDoneProgressCreateParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := client.WorkDoneProgressCreate(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodWindowLogMessage: // notification + defer logger.Debug(MethodWindowLogMessage, zap.Error(err)) + + var params LogMessageParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := client.LogMessage(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodTextDocumentPublishDiagnostics: // notification + defer logger.Debug(MethodTextDocumentPublishDiagnostics, zap.Error(err)) + + var params PublishDiagnosticsParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := client.PublishDiagnostics(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodWindowShowMessage: // notification + defer logger.Debug(MethodWindowShowMessage, zap.Error(err)) + + var params ShowMessageParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := client.ShowMessage(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodWindowShowMessageRequest: // request + defer logger.Debug(MethodWindowShowMessageRequest, zap.Error(err)) + + var params ShowMessageRequestParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := client.ShowMessageRequest(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTelemetryEvent: // notification + defer logger.Debug(MethodTelemetryEvent, zap.Error(err)) + + var params interface{} + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := client.Telemetry(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodClientRegisterCapability: // request + defer logger.Debug(MethodClientRegisterCapability, zap.Error(err)) + + var params RegistrationParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := client.RegisterCapability(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodClientUnregisterCapability: // request + defer logger.Debug(MethodClientUnregisterCapability, zap.Error(err)) + + var params UnregistrationParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := client.UnregisterCapability(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodWorkspaceApplyEdit: // request + defer logger.Debug(MethodWorkspaceApplyEdit, zap.Error(err)) + + var params ApplyWorkspaceEditParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := client.ApplyEdit(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodWorkspaceConfiguration: // request + defer logger.Debug(MethodWorkspaceConfiguration, zap.Error(err)) + + var params ConfigurationParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := client.Configuration(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodWorkspaceWorkspaceFolders: // request + defer logger.Debug(MethodWorkspaceWorkspaceFolders, zap.Error(err)) + + if len(req.Params()) > 0 { + return true, reply(ctx, nil, fmt.Errorf("expected no params: %w", jsonrpc2.ErrInvalidParams)) + } + + resp, err := client.WorkspaceFolders(ctx) + + return true, reply(ctx, resp, err) + + default: + return false, nil + } +} + +// Client represents a Language Server Protocol client. +type Client interface { + Progress(ctx context.Context, params *ProgressParams) (err error) + WorkDoneProgressCreate(ctx context.Context, params *WorkDoneProgressCreateParams) (err error) + LogMessage(ctx context.Context, params *LogMessageParams) (err error) + PublishDiagnostics(ctx context.Context, params *PublishDiagnosticsParams) (err error) + ShowMessage(ctx context.Context, params *ShowMessageParams) (err error) + ShowMessageRequest(ctx context.Context, params *ShowMessageRequestParams) (result *MessageActionItem, err error) + Telemetry(ctx context.Context, params interface{}) (err error) + RegisterCapability(ctx context.Context, params *RegistrationParams) (err error) + UnregisterCapability(ctx context.Context, params *UnregistrationParams) (err error) + ApplyEdit(ctx context.Context, params *ApplyWorkspaceEditParams) (result bool, err error) + Configuration(ctx context.Context, params *ConfigurationParams) (result []interface{}, err error) + WorkspaceFolders(ctx context.Context) (result []WorkspaceFolder, err error) +} + +// list of client methods. +const ( + // MethodProgress method name of "$/progress". + MethodProgress = "$/progress" + + // MethodWorkDoneProgressCreate method name of "window/workDoneProgress/create". + MethodWorkDoneProgressCreate = "window/workDoneProgress/create" + + // MethodWindowShowMessage method name of "window/showMessage". + MethodWindowShowMessage = "window/showMessage" + + // MethodWindowShowMessageRequest method name of "window/showMessageRequest. + MethodWindowShowMessageRequest = "window/showMessageRequest" + + // MethodWindowLogMessage method name of "window/logMessage. + MethodWindowLogMessage = "window/logMessage" + + // MethodTelemetryEvent method name of "telemetry/event. + MethodTelemetryEvent = "telemetry/event" + + // MethodClientRegisterCapability method name of "client/registerCapability. + MethodClientRegisterCapability = "client/registerCapability" + + // MethodClientUnregisterCapability method name of "client/unregisterCapability. + MethodClientUnregisterCapability = "client/unregisterCapability" + + // MethodTextDocumentPublishDiagnostics method name of "textDocument/publishDiagnostics. + MethodTextDocumentPublishDiagnostics = "textDocument/publishDiagnostics" + + // MethodWorkspaceApplyEdit method name of "workspace/applyEdit. + MethodWorkspaceApplyEdit = "workspace/applyEdit" + + // MethodWorkspaceConfiguration method name of "workspace/configuration. + MethodWorkspaceConfiguration = "workspace/configuration" + + // MethodWorkspaceWorkspaceFolders method name of "workspace/workspaceFolders". + MethodWorkspaceWorkspaceFolders = "workspace/workspaceFolders" +) + +// client implements a Language Server Protocol client. +type client struct { + jsonrpc2.Conn + + logger *zap.Logger +} + +// compiler time check whether the Client implements ClientInterface interface. +var _ Client = (*client)(nil) + +// Progress is the base protocol offers also support to report progress in a generic fashion. +// +// This mechanism can be used to report any kind of progress including work done progress (usually used to report progress in the user interface using a progress bar) and +// partial result progress to support streaming of results. +// +// @since 3.16.0. +func (c *client) Progress(ctx context.Context, params *ProgressParams) (err error) { + c.logger.Debug("call " + MethodProgress) + defer c.logger.Debug("end "+MethodProgress, zap.Error(err)) + + return c.Conn.Notify(ctx, MethodProgress, params) +} + +// WorkDoneProgressCreate sends the request is sent from the server to the client to ask the client to create a work done progress. +// +// @since 3.16.0. +func (c *client) WorkDoneProgressCreate(ctx context.Context, params *WorkDoneProgressCreateParams) (err error) { + c.logger.Debug("call " + MethodWorkDoneProgressCreate) + defer c.logger.Debug("end "+MethodWorkDoneProgressCreate, zap.Error(err)) + + return Call(ctx, c.Conn, MethodWorkDoneProgressCreate, params, nil) +} + +// LogMessage sends the notification from the server to the client to ask the client to log a particular message. +func (c *client) LogMessage(ctx context.Context, params *LogMessageParams) (err error) { + c.logger.Debug("call " + MethodWindowLogMessage) + defer c.logger.Debug("end "+MethodWindowLogMessage, zap.Error(err)) + + return c.Conn.Notify(ctx, MethodWindowLogMessage, params) +} + +// PublishDiagnostics sends the notification from the server to the client to signal results of validation runs. +// +// Diagnostics are “owned” by the server so it is the server’s responsibility to clear them if necessary. The following rule is used for VS Code servers that generate diagnostics: +// +// - if a language is single file only (for example HTML) then diagnostics are cleared by the server when the file is closed. +// - if a language has a project system (for example C#) diagnostics are not cleared when a file closes. When a project is opened all diagnostics for all files are recomputed (or read from a cache). +// +// When a file changes it is the server’s responsibility to re-compute diagnostics and push them to the client. +// If the computed set is empty it has to push the empty array to clear former diagnostics. +// Newly pushed diagnostics always replace previously pushed diagnostics. There is no merging that happens on the client side. +func (c *client) PublishDiagnostics(ctx context.Context, params *PublishDiagnosticsParams) (err error) { + c.logger.Debug("call " + MethodTextDocumentPublishDiagnostics) + defer c.logger.Debug("end "+MethodTextDocumentPublishDiagnostics, zap.Error(err)) + + return c.Conn.Notify(ctx, MethodTextDocumentPublishDiagnostics, params) +} + +// ShowMessage sends the notification from a server to a client to ask the +// client to display a particular message in the user interface. +func (c *client) ShowMessage(ctx context.Context, params *ShowMessageParams) (err error) { + return c.Conn.Notify(ctx, MethodWindowShowMessage, params) +} + +// ShowMessageRequest sends the request from a server to a client to ask the client to display a particular message in the user interface. +// +// In addition to the show message notification the request allows to pass actions and to wait for an answer from the client. +func (c *client) ShowMessageRequest(ctx context.Context, params *ShowMessageRequestParams) (_ *MessageActionItem, err error) { + c.logger.Debug("call " + MethodWindowShowMessageRequest) + defer c.logger.Debug("end "+MethodWindowShowMessageRequest, zap.Error(err)) + + var result *MessageActionItem + if err := Call(ctx, c.Conn, MethodWindowShowMessageRequest, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// Telemetry sends the notification from the server to the client to ask the client to log a telemetry event. +func (c *client) Telemetry(ctx context.Context, params interface{}) (err error) { + c.logger.Debug("call " + MethodTelemetryEvent) + defer c.logger.Debug("end "+MethodTelemetryEvent, zap.Error(err)) + + return c.Conn.Notify(ctx, MethodTelemetryEvent, params) +} + +// RegisterCapability sends the request from the server to the client to register for a new capability on the client side. +// +// Not all clients need to support dynamic capability registration. +// +// A client opts in via the dynamicRegistration property on the specific client capabilities. +// A client can even provide dynamic registration for capability A but not for capability B (see TextDocumentClientCapabilities as an example). +func (c *client) RegisterCapability(ctx context.Context, params *RegistrationParams) (err error) { + c.logger.Debug("call " + MethodClientRegisterCapability) + defer c.logger.Debug("end "+MethodClientRegisterCapability, zap.Error(err)) + + return Call(ctx, c.Conn, MethodClientRegisterCapability, params, nil) +} + +// UnregisterCapability sends the request from the server to the client to unregister a previously registered capability. +func (c *client) UnregisterCapability(ctx context.Context, params *UnregistrationParams) (err error) { + c.logger.Debug("call " + MethodClientUnregisterCapability) + defer c.logger.Debug("end "+MethodClientUnregisterCapability, zap.Error(err)) + + return Call(ctx, c.Conn, MethodClientUnregisterCapability, params, nil) +} + +// ApplyEdit sends the request from the server to the client to modify resource on the client side. +func (c *client) ApplyEdit(ctx context.Context, params *ApplyWorkspaceEditParams) (result bool, err error) { + c.logger.Debug("call " + MethodWorkspaceApplyEdit) + defer c.logger.Debug("end "+MethodWorkspaceApplyEdit, zap.Error(err)) + + if err := Call(ctx, c.Conn, MethodWorkspaceApplyEdit, params, &result); err != nil { + return false, err + } + + return result, nil +} + +// Configuration sends the request from the server to the client to fetch configuration settings from the client. +// +// The request can fetch several configuration settings in one roundtrip. +// The order of the returned configuration settings correspond to the order of the +// passed ConfigurationItems (e.g. the first item in the response is the result for the first configuration item in the params). +func (c *client) Configuration(ctx context.Context, params *ConfigurationParams) (_ []interface{}, err error) { + c.logger.Debug("call " + MethodWorkspaceConfiguration) + defer c.logger.Debug("end "+MethodWorkspaceConfiguration, zap.Error(err)) + + var result []interface{} + if err := Call(ctx, c.Conn, MethodWorkspaceConfiguration, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// WorkspaceFolders sends the request from the server to the client to fetch the current open list of workspace folders. +// +// Returns null in the response if only a single file is open in the tool. Returns an empty array if a workspace is open but no folders are configured. +// +// @since 3.6.0. +func (c *client) WorkspaceFolders(ctx context.Context) (result []WorkspaceFolder, err error) { + c.logger.Debug("call " + MethodWorkspaceWorkspaceFolders) + defer c.logger.Debug("end "+MethodWorkspaceWorkspaceFolders, zap.Error(err)) + + if err := Call(ctx, c.Conn, MethodWorkspaceWorkspaceFolders, nil, &result); err != nil { + return nil, err + } + + return result, nil +} diff --git a/vendor/go.lsp.dev/protocol/context.go b/vendor/go.lsp.dev/protocol/context.go new file mode 100644 index 00000000000..d12bcd800d3 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/context.go @@ -0,0 +1,35 @@ +// SPDX-FileCopyrightText: 2020 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import ( + "context" + + "go.uber.org/zap" +) + +var ( + ctxLogger struct{} + ctxClient struct{} +) + +// WithLogger returns the context with zap.Logger value. +func WithLogger(ctx context.Context, logger *zap.Logger) context.Context { + return context.WithValue(ctx, ctxLogger, logger) +} + +// LoggerFromContext extracts zap.Logger from context. +func LoggerFromContext(ctx context.Context) *zap.Logger { + logger, ok := ctx.Value(ctxLogger).(*zap.Logger) + if !ok { + return zap.NewNop() + } + + return logger +} + +// WithClient returns the context with Client value. +func WithClient(ctx context.Context, client Client) context.Context { + return context.WithValue(ctx, ctxClient, client) +} diff --git a/vendor/go.lsp.dev/protocol/deprecated.go b/vendor/go.lsp.dev/protocol/deprecated.go new file mode 100644 index 00000000000..fa4b2160939 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/deprecated.go @@ -0,0 +1,264 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +// ClientCapabilitiesShowDocument alias of ShowDocumentClientCapabilities. +// +// Deprecated: Use ShowDocumentClientCapabilities instead. +type ClientCapabilitiesShowDocument = ShowDocumentClientCapabilities + +// ClientCapabilitiesShowMessageRequest alias of ShowMessageRequestClientCapabilities. +// +// Deprecated: Use ShowMessageRequestClientCapabilities instead. +type ClientCapabilitiesShowMessageRequest = ShowMessageRequestClientCapabilities + +// ClientCapabilitiesShowMessageRequestMessageActionItem alias of ShowMessageRequestClientCapabilitiesMessageActionItem. +// +// Deprecated: Use ShowMessageRequestClientCapabilitiesMessageActionItem instead. +type ClientCapabilitiesShowMessageRequestMessageActionItem = ShowMessageRequestClientCapabilitiesMessageActionItem + +// ReferencesParams alias of ReferenceParams. +// +// Deprecated: Use ReferenceParams instead. +type ReferencesParams = ReferenceParams + +// TextDocumentClientCapabilitiesCallHierarchy alias of CallHierarchyClientCapabilities. +// +// Deprecated: Use CallHierarchyClientCapabilities instead. +type TextDocumentClientCapabilitiesCallHierarchy = CallHierarchyClientCapabilities + +// TextDocumentClientCapabilitiesCodeAction alias of CodeActionClientCapabilities. +// +// Deprecated: Use CodeActionClientCapabilities instead. +type TextDocumentClientCapabilitiesCodeAction = CodeActionClientCapabilities + +// TextDocumentClientCapabilitiesCodeActionKind alias of CodeActionClientCapabilitiesKind. +// +// Deprecated: Use CodeActionClientCapabilitiesKind instead. +type TextDocumentClientCapabilitiesCodeActionKind = CodeActionClientCapabilitiesKind + +// TextDocumentClientCapabilitiesCodeActionLiteralSupport alias of CodeActionClientCapabilitiesLiteralSupport. +// +// Deprecated: Use CodeActionClientCapabilitiesLiteralSupport instead. +type TextDocumentClientCapabilitiesCodeActionLiteralSupport = CodeActionClientCapabilitiesLiteralSupport + +// TextDocumentClientCapabilitiesCodeActionResolveSupport alias of CodeActionClientCapabilitiesResolveSupport. +// +// Deprecated: Use CodeActionClientCapabilitiesResolveSupport instead. +type TextDocumentClientCapabilitiesCodeActionResolveSupport = CodeActionClientCapabilitiesResolveSupport + +// TextDocumentClientCapabilitiesCodeLens alias of CodeLensClientCapabilities. +// +// Deprecated: Use CodeLensClientCapabilities instead. +type TextDocumentClientCapabilitiesCodeLens = CodeLensClientCapabilities + +// TextDocumentClientCapabilitiesColorProvider alias of DocumentColorClientCapabilities. +// +// Deprecated: Use DocumentColorClientCapabilities instead. +type TextDocumentClientCapabilitiesColorProvider = DocumentColorClientCapabilities + +// TextDocumentClientCapabilitiesCompletion alias of CompletionTextDocumentClientCapabilities. +// +// Deprecated: Use CompletionTextDocumentClientCapabilities instead. +type TextDocumentClientCapabilitiesCompletion = CompletionTextDocumentClientCapabilities + +// TextDocumentClientCapabilitiesCompletionItem alias of CompletionTextDocumentClientCapabilitiesItem. +// +// Deprecated: Use CompletionTextDocumentClientCapabilitiesItem instead. +type TextDocumentClientCapabilitiesCompletionItem = CompletionTextDocumentClientCapabilitiesItem + +// TextDocumentClientCapabilitiesCompletionItemInsertTextModeSupport alias of CompletionTextDocumentClientCapabilitiesItemInsertTextModeSupport. +// +// Deprecated: Use CompletionTextDocumentClientCapabilitiesItemInsertTextModeSupport instead. +type TextDocumentClientCapabilitiesCompletionItemInsertTextModeSupport = CompletionTextDocumentClientCapabilitiesItemInsertTextModeSupport + +// TextDocumentClientCapabilitiesCompletionItemKind alias of CompletionTextDocumentClientCapabilitiesItemKind. +// +// Deprecated: Use CompletionTextDocumentClientCapabilitiesItemKind instead. +type TextDocumentClientCapabilitiesCompletionItemKind = CompletionTextDocumentClientCapabilitiesItemKind + +// TextDocumentClientCapabilitiesCompletionItemResolveSupport alias of CompletionTextDocumentClientCapabilitiesItemResolveSupport. +// +// Deprecated: Use CompletionTextDocumentClientCapabilitiesItemResolveSupport instead. +type TextDocumentClientCapabilitiesCompletionItemResolveSupport = CompletionTextDocumentClientCapabilitiesItemResolveSupport + +// TextDocumentClientCapabilitiesCompletionItemTagSupport alias of CompletionTextDocumentClientCapabilitiesItemTagSupport. +// +// Deprecated: Use CompletionTextDocumentClientCapabilitiesItemTagSupport instead. +type TextDocumentClientCapabilitiesCompletionItemTagSupport = CompletionTextDocumentClientCapabilitiesItemTagSupport + +// TextDocumentClientCapabilitiesDeclaration alias of DeclarationTextDocumentClientCapabilities. +// +// Deprecated: Use DeclarationTextDocumentClientCapabilities instead. +type TextDocumentClientCapabilitiesDeclaration = DeclarationTextDocumentClientCapabilities + +// TextDocumentClientCapabilitiesDefinition alias of DefinitionTextDocumentClientCapabilities. +// +// Deprecated: Use DefinitionTextDocumentClientCapabilities instead. +type TextDocumentClientCapabilitiesDefinition = DefinitionTextDocumentClientCapabilities + +// TextDocumentClientCapabilitiesDocumentHighlight alias of DocumentHighlightClientCapabilities. +// +// Deprecated: Use DocumentHighlightClientCapabilities instead. +type TextDocumentClientCapabilitiesDocumentHighlight = DocumentHighlightClientCapabilities + +// TextDocumentClientCapabilitiesDocumentLink alias of DocumentLinkClientCapabilities. +// +// Deprecated: Use DocumentLinkClientCapabilities instead. +type TextDocumentClientCapabilitiesDocumentLink = DocumentLinkClientCapabilities + +// TextDocumentClientCapabilitiesDocumentSymbol alias of DocumentSymbolClientCapabilities. +// +// Deprecated: Use DocumentSymbolClientCapabilities instead. +type TextDocumentClientCapabilitiesDocumentSymbol = DocumentSymbolClientCapabilities + +// TextDocumentClientCapabilitiesDocumentSymbolTagSupport alias of DocumentSymbolClientCapabilitiesTagSupport. +// +// Deprecated: Use DocumentSymbolClientCapabilitiesTagSupport instead. +type TextDocumentClientCapabilitiesDocumentSymbolTagSupport = DocumentSymbolClientCapabilitiesTagSupport + +// TextDocumentClientCapabilitiesFoldingRange alias of FoldingRangeClientCapabilities. +// +// Deprecated: Use FoldingRangeClientCapabilities instead. +type TextDocumentClientCapabilitiesFoldingRange = FoldingRangeClientCapabilities + +// TextDocumentClientCapabilitiesFormatting alias of DocumentFormattingClientCapabilities. +// +// Deprecated: Use DocumentFormattingClientCapabilities instead. +type TextDocumentClientCapabilitiesFormatting = DocumentFormattingClientCapabilities + +// TextDocumentClientCapabilitiesHover alias of HoverTextDocumentClientCapabilities. +// +// Deprecated: Use HoverTextDocumentClientCapabilities instead. +type TextDocumentClientCapabilitiesHover = HoverTextDocumentClientCapabilities + +// TextDocumentClientCapabilitiesImplementation alias of ImplementationTextDocumentClientCapabilities. +// +// Deprecated: Use ImplementationTextDocumentClientCapabilities instead. +type TextDocumentClientCapabilitiesImplementation = ImplementationTextDocumentClientCapabilities + +// TextDocumentClientCapabilitiesLinkedEditingRange alias of LinkedEditingRangeClientCapabilities. +// +// Deprecated: Use LinkedEditingRangeClientCapabilities instead. +type TextDocumentClientCapabilitiesLinkedEditingRange = LinkedEditingRangeClientCapabilities + +// TextDocumentClientCapabilitiesMoniker of MonikerClientCapabilities. +// +// Deprecated: Use MonikerClientCapabilities instead. +type TextDocumentClientCapabilitiesMoniker = MonikerClientCapabilities + +// TextDocumentClientCapabilitiesOnTypeFormatting of DocumentOnTypeFormattingClientCapabilities. +// +// Deprecated: Use DocumentOnTypeFormattingClientCapabilities instead. +type TextDocumentClientCapabilitiesOnTypeFormatting = DocumentOnTypeFormattingClientCapabilities + +// TextDocumentClientCapabilitiesPublishDiagnostics of PublishDiagnosticsClientCapabilities. +// +// Deprecated: Use PublishDiagnosticsClientCapabilities instead. +type TextDocumentClientCapabilitiesPublishDiagnostics = PublishDiagnosticsClientCapabilities + +// TextDocumentClientCapabilitiesPublishDiagnosticsTagSupport of PublishDiagnosticsClientCapabilitiesTagSupport. +// +// Deprecated: Use PublishDiagnosticsClientCapabilitiesTagSupport instead. +type TextDocumentClientCapabilitiesPublishDiagnosticsTagSupport = PublishDiagnosticsClientCapabilitiesTagSupport + +// TextDocumentClientCapabilitiesRangeFormatting of DocumentRangeFormattingClientCapabilities. +// +// Deprecated: Use DocumentRangeFormattingClientCapabilities instead. +type TextDocumentClientCapabilitiesRangeFormatting = DocumentRangeFormattingClientCapabilities + +// TextDocumentClientCapabilitiesReferences of ReferencesTextDocumentClientCapabilities. +// +// Deprecated: Use ReferencesTextDocumentClientCapabilities instead. +type TextDocumentClientCapabilitiesReferences = ReferencesTextDocumentClientCapabilities + +// TextDocumentClientCapabilitiesRename of RenameClientCapabilities. +// +// Deprecated: Use RenameClientCapabilities instead. +type TextDocumentClientCapabilitiesRename = RenameClientCapabilities + +// TextDocumentClientCapabilitiesSelectionRange of SelectionRangeClientCapabilities. +// +// Deprecated: Use SelectionRangeClientCapabilities instead. +type TextDocumentClientCapabilitiesSelectionRange = SelectionRangeClientCapabilities + +// TextDocumentClientCapabilitiesSemanticTokens of SemanticTokensClientCapabilities. +// +// Deprecated: Use SemanticTokensClientCapabilities instead. +type TextDocumentClientCapabilitiesSemanticTokens = SemanticTokensClientCapabilities + +// TextDocumentClientCapabilitiesSignatureHelp of SignatureHelpTextDocumentClientCapabilities. +// +// Deprecated: Use SignatureHelpTextDocumentClientCapabilities instead. +type TextDocumentClientCapabilitiesSignatureHelp = SignatureHelpTextDocumentClientCapabilities + +// TextDocumentClientCapabilitiesSynchronization of TextDocumentSyncClientCapabilities. +// +// Deprecated: Use TextDocumentSyncClientCapabilities instead. +type TextDocumentClientCapabilitiesSynchronization = TextDocumentSyncClientCapabilities + +// TextDocumentClientCapabilitiesTypeDefinition of TypeDefinitionTextDocumentClientCapabilities. +// +// Deprecated: Use TypeDefinitionTextDocumentClientCapabilities instead. +type TextDocumentClientCapabilitiesTypeDefinition = TypeDefinitionTextDocumentClientCapabilities + +// Abort alias of FailureHandlingKindAbort. +// +// Deprecated: Use FailureHandlingKindAbort instead. +const Abort = FailureHandlingKindAbort + +// TextOnlyTransactional alias of FailureHandlingKindTextOnlyTransactional. +// +// Deprecated: Use FailureHandlingKindTextOnlyTransactional instead. +const TextOnlyTransactional = FailureHandlingKindTextOnlyTransactional + +// Transactional alias of FailureHandlingKindTransactional. +// +// Deprecated: Use FailureHandlingKindTransactional instead. +const Transactional = FailureHandlingKindTransactional + +// Undo alias of FailureHandlingKindUndo. +// +// Deprecated: Use FailureHandlingKindUndo instead. +const Undo = FailureHandlingKindUndo + +// WorkspaceClientCapabilitiesSymbol alias of WorkspaceSymbolClientCapabilities. +// +// Deprecated: Use WorkspaceSymbolClientCapabilities instead. +type WorkspaceClientCapabilitiesSymbol = WorkspaceSymbolClientCapabilities + +// WorkspaceClientCapabilitiesSymbolKind alias of SymbolKindCapabilities. +// +// Deprecated: Use SymbolKindCapabilities instead. +type WorkspaceClientCapabilitiesSymbolKind = SymbolKindCapabilities + +// WorkspaceClientCapabilitiesCodeLens alias of CodeLensWorkspaceClientCapabilities. +// +// Deprecated: Use CodeLensWorkspaceClientCapabilities instead. +type WorkspaceClientCapabilitiesCodeLens = CodeLensWorkspaceClientCapabilities + +// WorkspaceClientCapabilitiesDidChangeConfiguration alias of DidChangeConfigurationWorkspaceClientCapabilities. +// +// Deprecated: Use DidChangeConfigurationWorkspaceClientCapabilities instead. +type WorkspaceClientCapabilitiesDidChangeConfiguration = DidChangeConfigurationWorkspaceClientCapabilities + +// WorkspaceClientCapabilitiesDidChangeWatchedFiles alias of DidChangeWatchedFilesWorkspaceClientCapabilities. +// +// Deprecated: Use DidChangeWatchedFilesWorkspaceClientCapabilities instead. +type WorkspaceClientCapabilitiesDidChangeWatchedFiles = DidChangeWatchedFilesWorkspaceClientCapabilities + +// WorkspaceClientCapabilitiesExecuteCommand alias of ExecuteCommandClientCapabilities. +// +// Deprecated: Use ExecuteCommandClientCapabilities instead. +type WorkspaceClientCapabilitiesExecuteCommand = ExecuteCommandClientCapabilities + +// WorkspaceClientCapabilitiesSemanticTokens alias of SemanticTokensWorkspaceClientCapabilities. +// +// Deprecated: Use SemanticTokensWorkspaceClientCapabilities instead. +type WorkspaceClientCapabilitiesSemanticTokens = SemanticTokensWorkspaceClientCapabilities + +// WorkspaceClientCapabilitiesSemanticTokensRequests alias of SemanticTokensWorkspaceClientCapabilitiesRequests. +// +// Deprecated: Use SemanticTokensWorkspaceClientCapabilitiesRequests instead. +type WorkspaceClientCapabilitiesSemanticTokensRequests = SemanticTokensWorkspaceClientCapabilitiesRequests diff --git a/vendor/go.lsp.dev/protocol/diagnostics.go b/vendor/go.lsp.dev/protocol/diagnostics.go new file mode 100644 index 00000000000..6097f466e42 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/diagnostics.go @@ -0,0 +1,149 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import ( + "strconv" +) + +// Diagnostic represents a diagnostic, such as a compiler error or warning. +// +// Diagnostic objects are only valid in the scope of a resource. +type Diagnostic struct { + // Range is the range at which the message applies. + Range Range `json:"range"` + + // Severity is the diagnostic's severity. Can be omitted. If omitted it is up to the + // client to interpret diagnostics as error, warning, info or hint. + Severity DiagnosticSeverity `json:"severity,omitempty"` + + // Code is the diagnostic's code, which might appear in the user interface. + Code interface{} `json:"code,omitempty"` // int32 | string; + + // CodeDescription an optional property to describe the error code. + // + // @since 3.16.0. + CodeDescription *CodeDescription `json:"codeDescription,omitempty"` + + // Source a human-readable string describing the source of this + // diagnostic, e.g. 'typescript' or 'super lint'. + Source string `json:"source,omitempty"` + + // Message is the diagnostic's message. + Message string `json:"message"` + + // Tags is the additional metadata about the diagnostic. + // + // @since 3.15.0. + Tags []DiagnosticTag `json:"tags,omitempty"` + + // RelatedInformation an array of related diagnostic information, e.g. when symbol-names within + // a scope collide all definitions can be marked via this property. + RelatedInformation []DiagnosticRelatedInformation `json:"relatedInformation,omitempty"` + + // Data is a data entry field that is preserved between a + // "textDocument/publishDiagnostics" notification and + // "textDocument/codeAction" request. + // + // @since 3.16.0. + Data interface{} `json:"data,omitempty"` +} + +// DiagnosticSeverity indicates the severity of a Diagnostic message. +type DiagnosticSeverity float64 + +const ( + // DiagnosticSeverityError reports an error. + DiagnosticSeverityError DiagnosticSeverity = 1 + + // DiagnosticSeverityWarning reports a warning. + DiagnosticSeverityWarning DiagnosticSeverity = 2 + + // DiagnosticSeverityInformation reports an information. + DiagnosticSeverityInformation DiagnosticSeverity = 3 + + // DiagnosticSeverityHint reports a hint. + DiagnosticSeverityHint DiagnosticSeverity = 4 +) + +// String implements fmt.Stringer. +func (d DiagnosticSeverity) String() string { + switch d { + case DiagnosticSeverityError: + return "Error" + case DiagnosticSeverityWarning: + return "Warning" + case DiagnosticSeverityInformation: + return "Information" + case DiagnosticSeverityHint: + return "Hint" + default: + return strconv.FormatFloat(float64(d), 'f', -10, 64) + } +} + +// CodeDescription is the structure to capture a description for an error code. +// +// @since 3.16.0. +type CodeDescription struct { + // Href an URI to open with more information about the diagnostic error. + Href URI `json:"href"` +} + +// DiagnosticTag is the diagnostic tags. +// +// @since 3.15.0. +type DiagnosticTag float64 + +// list of DiagnosticTag. +const ( + // DiagnosticTagUnnecessary unused or unnecessary code. + // + // Clients are allowed to render diagnostics with this tag faded out instead of having + // an error squiggle. + DiagnosticTagUnnecessary DiagnosticTag = 1 + + // DiagnosticTagDeprecated deprecated or obsolete code. + // + // Clients are allowed to rendered diagnostics with this tag strike through. + DiagnosticTagDeprecated DiagnosticTag = 2 +) + +// String implements fmt.Stringer. +func (d DiagnosticTag) String() string { + switch d { + case DiagnosticTagUnnecessary: + return "Unnecessary" + case DiagnosticTagDeprecated: + return "Deprecated" + default: + return strconv.FormatFloat(float64(d), 'f', -10, 64) + } +} + +// DiagnosticRelatedInformation represents a related message and source code location for a diagnostic. +// +// This should be used to point to code locations that cause or related to a diagnostics, e.g when duplicating +// a symbol in a scope. +type DiagnosticRelatedInformation struct { + // Location is the location of this related diagnostic information. + Location Location `json:"location"` + + // Message is the message of this related diagnostic information. + Message string `json:"message"` +} + +// PublishDiagnosticsParams represents a params of PublishDiagnostics notification. +type PublishDiagnosticsParams struct { + // URI is the URI for which diagnostic information is reported. + URI DocumentURI `json:"uri"` + + // Version optional the version number of the document the diagnostics are published for. + // + // @since 3.15 + Version uint32 `json:"version,omitempty"` + + // Diagnostics an array of diagnostic information items. + Diagnostics []Diagnostic `json:"diagnostics"` +} diff --git a/vendor/go.lsp.dev/protocol/doc.go b/vendor/go.lsp.dev/protocol/doc.go new file mode 100644 index 00000000000..487378392c3 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/doc.go @@ -0,0 +1,23 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +// Package protocol implements Language Server Protocol specification in Go. +// +// This package contains the structs that map directly to the wire format +// of the Language Server Protocol. +// +// It is a literal transcription, with unmodified comments, and only the changes +// required to make it Go code. +// +// - Names are uppercased to export them. +// +// - All fields have JSON tags added to correct the names. +// +// - Fields marked with a ? are also marked as "omitempty". +// +// - Fields that are "|| null" are made pointers. +// +// - Fields that are string or number are left as string. +// +// - Fields that are type "number" are made float64. +package protocol // import "go.lsp.dev/protocol" diff --git a/vendor/go.lsp.dev/protocol/errors.go b/vendor/go.lsp.dev/protocol/errors.go new file mode 100644 index 00000000000..56645f687ee --- /dev/null +++ b/vendor/go.lsp.dev/protocol/errors.go @@ -0,0 +1,40 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import "go.lsp.dev/jsonrpc2" + +const ( + // LSPReservedErrorRangeStart is the start range of LSP reserved error codes. + // + // It doesn't denote a real error code. + // + // @since 3.16.0. + LSPReservedErrorRangeStart jsonrpc2.Code = -32899 + + // ContentModified is the state change that invalidates the result of a request in execution. + // + // Defined by the protocol. + CodeContentModified jsonrpc2.Code = -32801 + + // RequestCancelled is the cancellation error. + // + // Defined by the protocol. + CodeRequestCancelled jsonrpc2.Code = -32800 + + // LSPReservedErrorRangeEnd is the end range of LSP reserved error codes. + // + // It doesn't denote a real error code. + // + // @since 3.16.0. + LSPReservedErrorRangeEnd jsonrpc2.Code = -32800 +) + +var ( + // ErrContentModified should be used when a request is canceled early. + ErrContentModified = jsonrpc2.NewError(CodeContentModified, "cancelled JSON-RPC") + + // ErrRequestCancelled should be used when a request is canceled early. + ErrRequestCancelled = jsonrpc2.NewError(CodeRequestCancelled, "cancelled JSON-RPC") +) diff --git a/vendor/go.lsp.dev/protocol/general.go b/vendor/go.lsp.dev/protocol/general.go new file mode 100644 index 00000000000..1693b0cccb0 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/general.go @@ -0,0 +1,461 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +// TraceValue represents a InitializeParams Trace mode. +type TraceValue string + +// list of TraceValue. +const ( + // TraceOff disable tracing. + TraceOff TraceValue = "off" + + // TraceMessage normal tracing mode. + TraceMessage TraceValue = "message" + + // TraceVerbose verbose tracing mode. + TraceVerbose TraceValue = "verbose" +) + +// ClientInfo information about the client. +// +// @since 3.15.0. +type ClientInfo struct { + // Name is the name of the client as defined by the client. + Name string `json:"name"` + + // Version is the client's version as defined by the client. + Version string `json:"version,omitempty"` +} + +// InitializeParams params of Initialize request. +type InitializeParams struct { + WorkDoneProgressParams + + // ProcessID is the process Id of the parent process that started + // the server. Is null if the process has not been started by another process. + // If the parent process is not alive then the server should exit (see exit notification) its process. + ProcessID int32 `json:"processId"` + + // ClientInfo is the information about the client. + // + // @since 3.15.0 + ClientInfo *ClientInfo `json:"clientInfo,omitempty"` + + // Locale is the locale the client is currently showing the user interface + // in. This must not necessarily be the locale of the operating + // system. + // + // Uses IETF language tags as the value's syntax + // (See https://en.wikipedia.org/wiki/IETF_language_tag) + // + // @since 3.16.0. + Locale string `json:"locale,omitempty"` + + // RootPath is the rootPath of the workspace. Is null + // if no folder is open. + // + // Deprecated: Use RootURI instead. + RootPath string `json:"rootPath,omitempty"` + + // RootURI is the rootUri of the workspace. Is null if no + // folder is open. If both `rootPath` and "rootUri" are set + // "rootUri" wins. + // + // Deprecated: Use WorkspaceFolders instead. + RootURI DocumentURI `json:"rootUri,omitempty"` + + // InitializationOptions user provided initialization options. + InitializationOptions interface{} `json:"initializationOptions,omitempty"` + + // Capabilities is the capabilities provided by the client (editor or tool) + Capabilities ClientCapabilities `json:"capabilities"` + + // Trace is the initial trace setting. If omitted trace is disabled ('off'). + Trace TraceValue `json:"trace,omitempty"` + + // WorkspaceFolders is the workspace folders configured in the client when the server starts. + // This property is only available if the client supports workspace folders. + // It can be `null` if the client supports workspace folders but none are + // configured. + // + // @since 3.6.0. + WorkspaceFolders []WorkspaceFolder `json:"workspaceFolders,omitempty"` +} + +// InitializeResult result of ClientCapabilities. +type InitializeResult struct { + // Capabilities is the capabilities the language server provides. + Capabilities ServerCapabilities `json:"capabilities"` + + // ServerInfo Information about the server. + // + // @since 3.15.0. + ServerInfo *ServerInfo `json:"serverInfo,omitempty"` +} + +// LogTraceParams params of LogTrace notification. +// +// @since 3.16.0. +type LogTraceParams struct { + // Message is the message to be logged. + Message string `json:"message"` + + // Verbose is the additional information that can be computed if the "trace" configuration + // is set to "verbose". + Verbose TraceValue `json:"verbose,omitempty"` +} + +// SetTraceParams params of SetTrace notification. +// +// @since 3.16.0. +type SetTraceParams struct { + // Value is the new value that should be assigned to the trace setting. + Value TraceValue `json:"value"` +} + +// FileOperationPatternKind is a pattern kind describing if a glob pattern matches a file a folder or +// both. +// +// @since 3.16.0. +type FileOperationPatternKind string + +// list of FileOperationPatternKind. +const ( + // FileOperationPatternKindFile is the pattern matches a file only. + FileOperationPatternKindFile FileOperationPatternKind = "file" + + // FileOperationPatternKindFolder is the pattern matches a folder only. + FileOperationPatternKindFolder FileOperationPatternKind = "folder" +) + +// FileOperationPatternOptions matching options for the file operation pattern. +// +// @since 3.16.0. +type FileOperationPatternOptions struct { + // IgnoreCase is The pattern should be matched ignoring casing. + IgnoreCase bool `json:"ignoreCase,omitempty"` +} + +// FileOperationPattern a pattern to describe in which file operation requests or notifications +// the server is interested in. +// +// @since 3.16.0. +type FileOperationPattern struct { + // The glob pattern to match. Glob patterns can have the following syntax: + // - `*` to match one or more characters in a path segment + // - `?` to match on one character in a path segment + // - `**` to match any number of path segments, including none + // - `{}` to group conditions (e.g. `**​/*.{ts,js}` matches all TypeScript + // and JavaScript files) + // - `[]` to declare a range of characters to match in a path segment + // (e.g., `example.[0-9]` to match on `example.0`, `example.1`, …) + // - `[!...]` to negate a range of characters to match in a path segment + // (e.g., `example.[!0-9]` to match on `example.a`, `example.b`, but + // not `example.0`) + Glob string `json:"glob"` + + // Matches whether to match files or folders with this pattern. + // + // Matches both if undefined. + Matches FileOperationPatternKind `json:"matches,omitempty"` + + // Options additional options used during matching. + Options FileOperationPatternOptions `json:"options,omitempty"` +} + +// FileOperationFilter is a filter to describe in which file operation requests or notifications +// the server is interested in. +// +// @since 3.16.0. +type FileOperationFilter struct { + // Scheme is a URI like "file" or "untitled". + Scheme string `json:"scheme,omitempty"` + + // Pattern is the actual file operation pattern. + Pattern FileOperationPattern `json:"pattern"` +} + +// CreateFilesParams is the parameters sent in notifications/requests for user-initiated creation +// of files. +// +// @since 3.16.0. +type CreateFilesParams struct { + // Files an array of all files/folders created in this operation. + Files []FileCreate `json:"files"` +} + +// FileCreate nepresents information on a file/folder create. +// +// @since 3.16.0. +type FileCreate struct { + // URI is a file:// URI for the location of the file/folder being created. + URI string `json:"uri"` +} + +// RenameFilesParams is the parameters sent in notifications/requests for user-initiated renames +// of files. +// +// @since 3.16.0. +type RenameFilesParams struct { + // Files an array of all files/folders renamed in this operation. When a folder + // is renamed, only the folder will be included, and not its children. + Files []FileRename `json:"files"` +} + +// FileRename represents information on a file/folder rename. +// +// @since 3.16.0. +type FileRename struct { + // OldURI is a file:// URI for the original location of the file/folder being renamed. + OldURI string `json:"oldUri"` + + // NewURI is a file:// URI for the new location of the file/folder being renamed. + NewURI string `json:"newUri"` +} + +// DeleteFilesParams is the parameters sent in notifications/requests for user-initiated deletes +// of files. +// +// @since 3.16.0. +type DeleteFilesParams struct { + // Files an array of all files/folders deleted in this operation. + Files []FileDelete `json:"files"` +} + +// FileDelete represents information on a file/folder delete. +// +// @since 3.16.0. +type FileDelete struct { + // URI is a file:// URI for the location of the file/folder being deleted. + URI string `json:"uri"` +} + +// DocumentHighlightParams params of DocumentHighlight request. +// +// @since 3.15.0. +type DocumentHighlightParams struct { + TextDocumentPositionParams + WorkDoneProgressParams + PartialResultParams +} + +// DeclarationParams params of Declaration request. +// +// @since 3.15.0. +type DeclarationParams struct { + TextDocumentPositionParams + WorkDoneProgressParams + PartialResultParams +} + +// DefinitionParams params of Definition request. +// +// @since 3.15.0. +type DefinitionParams struct { + TextDocumentPositionParams + WorkDoneProgressParams + PartialResultParams +} + +// TypeDefinitionParams params of TypeDefinition request. +// +// @since 3.15.0. +type TypeDefinitionParams struct { + TextDocumentPositionParams + WorkDoneProgressParams + PartialResultParams +} + +// ImplementationParams params of Implementation request. +// +// @since 3.15.0. +type ImplementationParams struct { + TextDocumentPositionParams + WorkDoneProgressParams + PartialResultParams +} + +// ShowDocumentParams params to show a document. +// +// @since 3.16.0. +type ShowDocumentParams struct { + // URI is the document uri to show. + URI URI `json:"uri"` + + // External indicates to show the resource in an external program. + // To show for example `https://code.visualstudio.com/` + // in the default WEB browser set `external` to `true`. + External bool `json:"external,omitempty"` + + // TakeFocus an optional property to indicate whether the editor + // showing the document should take focus or not. + // Clients might ignore this property if an external + // program is started. + TakeFocus bool `json:"takeFocus,omitempty"` + + // Selection an optional selection range if the document is a text + // document. Clients might ignore the property if an + // external program is started or the file is not a text + // file. + Selection *Range `json:"selection,omitempty"` +} + +// ShowDocumentResult is the result of an show document request. +// +// @since 3.16.0. +type ShowDocumentResult struct { + // Success a boolean indicating if the show was successful. + Success bool `json:"success"` +} + +// ServerInfo Information about the server. +// +// @since 3.15.0. +type ServerInfo struct { + // Name is the name of the server as defined by the server. + Name string `json:"name"` + + // Version is the server's version as defined by the server. + Version string `json:"version,omitempty"` +} + +// InitializeError known error codes for an "InitializeError". +type InitializeError struct { + // Retry indicates whether the client execute the following retry logic: + // (1) show the message provided by the ResponseError to the user + // (2) user selects retry or cancel + // (3) if user selected retry the initialize method is sent again. + Retry bool `json:"retry,omitempty"` +} + +// ReferencesOptions ReferencesProvider options. +// +// @since 3.15.0. +type ReferencesOptions struct { + WorkDoneProgressOptions +} + +// WorkDoneProgressOptions WorkDoneProgress options. +// +// @since 3.15.0. +type WorkDoneProgressOptions struct { + WorkDoneProgress bool `json:"workDoneProgress,omitempty"` +} + +// LinkedEditingRangeParams params for the LinkedEditingRange request. +// +// @since 3.16.0. +type LinkedEditingRangeParams struct { + TextDocumentPositionParams + WorkDoneProgressParams +} + +// LinkedEditingRanges result of LinkedEditingRange request. +// +// @since 3.16.0. +type LinkedEditingRanges struct { + // Ranges a list of ranges that can be renamed together. + // + // The ranges must have identical length and contain identical text content. + // + // The ranges cannot overlap. + Ranges []Range `json:"ranges"` + + // WordPattern an optional word pattern (regular expression) that describes valid contents for + // the given ranges. + // + // If no pattern is provided, the client configuration's word pattern will be used. + WordPattern string `json:"wordPattern,omitempty"` +} + +// MonikerParams params for the Moniker request. +// +// @since 3.16.0. +type MonikerParams struct { + TextDocumentPositionParams + WorkDoneProgressParams + PartialResultParams +} + +// UniquenessLevel is the Moniker uniqueness level to define scope of the moniker. +// +// @since 3.16.0. +type UniquenessLevel string + +// list of UniquenessLevel. +const ( + // UniquenessLevelDocument is the moniker is only unique inside a document. + UniquenessLevelDocument UniquenessLevel = "document" + + // UniquenessLevelProject is the moniker is unique inside a project for which a dump got created. + UniquenessLevelProject UniquenessLevel = "project" + + // UniquenessLevelGroup is the moniker is unique inside the group to which a project belongs. + UniquenessLevelGroup UniquenessLevel = "group" + + // UniquenessLevelScheme is the moniker is unique inside the moniker scheme. + UniquenessLevelScheme UniquenessLevel = "scheme" + + // UniquenessLevelGlobal is the moniker is globally unique. + UniquenessLevelGlobal UniquenessLevel = "global" +) + +// MonikerKind is the moniker kind. +// +// @since 3.16.0. +type MonikerKind string + +// list of MonikerKind. +const ( + // MonikerKindImport is the moniker represent a symbol that is imported into a project. + MonikerKindImport MonikerKind = "import" + + // MonikerKindExport is the moniker represents a symbol that is exported from a project. + MonikerKindExport MonikerKind = "export" + + // MonikerKindLocal is the moniker represents a symbol that is local to a project (e.g. a local + // variable of a function, a class not visible outside the project, ...). + MonikerKindLocal MonikerKind = "local" +) + +// Moniker definition to match LSIF 0.5 moniker definition. +// +// @since 3.16.0. +type Moniker struct { + // Scheme is the scheme of the moniker. For example tsc or .Net. + Scheme string `json:"scheme"` + + // Identifier is the identifier of the moniker. + // + // The value is opaque in LSIF however schema owners are allowed to define the structure if they want. + Identifier string `json:"identifier"` + + // Unique is the scope in which the moniker is unique. + Unique UniquenessLevel `json:"unique"` + + // Kind is the moniker kind if known. + Kind MonikerKind `json:"kind,omitempty"` +} + +// StaticRegistrationOptions staticRegistration options to be returned in the initialize request. +type StaticRegistrationOptions struct { + // ID is the id used to register the request. The id can be used to deregister + // the request again. See also Registration#id. + ID string `json:"id,omitempty"` +} + +// DocumentLinkRegistrationOptions DocumentLinkRegistration options. +type DocumentLinkRegistrationOptions struct { + TextDocumentRegistrationOptions + + // ResolveProvider document links have a resolve provider as well. + ResolveProvider bool `json:"resolveProvider,omitempty"` +} + +// InitializedParams params of Initialized notification. +type InitializedParams struct{} + +// WorkspaceFolders represents a slice of WorkspaceFolder. +type WorkspaceFolders []WorkspaceFolder diff --git a/vendor/go.lsp.dev/protocol/handler.go b/vendor/go.lsp.dev/protocol/handler.go new file mode 100644 index 00000000000..ac253a246e2 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/handler.go @@ -0,0 +1,88 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import ( + "context" + "fmt" + + "github.com/segmentio/encoding/json" + + "go.lsp.dev/jsonrpc2" + "go.lsp.dev/pkg/xcontext" +) + +// CancelHandler handler of cancelling. +func CancelHandler(handler jsonrpc2.Handler) jsonrpc2.Handler { + handler, canceller := jsonrpc2.CancelHandler(handler) + + h := func(ctx context.Context, reply jsonrpc2.Replier, req jsonrpc2.Request) error { + if req.Method() != MethodCancelRequest { + // TODO(iancottrell): See if we can generate a reply for the request to be cancelled + // at the point of cancellation rather than waiting for gopls to naturally reply. + // To do that, we need to keep track of whether a reply has been sent already and + // be careful about racing between the two paths. + // TODO(iancottrell): Add a test that watches the stream and verifies the response + // for the cancelled request flows. + reply := func(ctx context.Context, resp interface{}, err error) error { + // https://microsoft.github.io/language-server-protocol/specifications/specification-current/#cancelRequest + if ctx.Err() != nil && err == nil { + err = ErrRequestCancelled + } + ctx = xcontext.Detach(ctx) + + return reply(ctx, resp, err) + } + + return handler(ctx, reply, req) + } + + var params CancelParams + if err := json.Unmarshal(req.Params(), ¶ms); err != nil { + return replyParseError(ctx, reply, err) + } + + switch id := params.ID.(type) { + case int32: + canceller(jsonrpc2.NewNumberID(id)) + case string: + canceller(jsonrpc2.NewStringID(id)) + default: + return replyParseError(ctx, reply, fmt.Errorf("request ID %v malformed", id)) + } + + return reply(ctx, nil, nil) + } + + return h +} + +// Handlers default jsonrpc2.Handler. +func Handlers(handler jsonrpc2.Handler) jsonrpc2.Handler { + return CancelHandler( + jsonrpc2.AsyncHandler( + jsonrpc2.ReplyHandler(handler), + ), + ) +} + +// Call calls method to params and result. +func Call(ctx context.Context, conn jsonrpc2.Conn, method string, params, result interface{}) error { + id, err := conn.Call(ctx, method, params, result) + if ctx.Err() != nil { + notifyCancel(ctx, conn, id) + } + + return err +} + +func notifyCancel(ctx context.Context, conn jsonrpc2.Conn, id jsonrpc2.ID) { + ctx = xcontext.Detach(ctx) + // Note that only *jsonrpc2.ID implements json.Marshaler. + conn.Notify(ctx, MethodCancelRequest, &CancelParams{ID: &id}) +} + +func replyParseError(ctx context.Context, reply jsonrpc2.Replier, err error) error { + return reply(ctx, nil, fmt.Errorf("%s: %w", jsonrpc2.ErrParse, err)) +} diff --git a/vendor/go.lsp.dev/protocol/language.go b/vendor/go.lsp.dev/protocol/language.go new file mode 100644 index 00000000000..221d72aa2cc --- /dev/null +++ b/vendor/go.lsp.dev/protocol/language.go @@ -0,0 +1,1316 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import ( + "strconv" +) + +// CompletionParams params of Completion request. +type CompletionParams struct { + TextDocumentPositionParams + WorkDoneProgressParams + PartialResultParams + + // Context is the completion context. This is only available if the client specifies + // to send this using `ClientCapabilities.textDocument.completion.contextSupport === true` + Context *CompletionContext `json:"context,omitempty"` +} + +// CompletionTriggerKind how a completion was triggered. +type CompletionTriggerKind float64 + +const ( + // CompletionTriggerKindInvoked completion was triggered by typing an identifier (24x7 code + // complete), manual invocation (e.g Ctrl+Space) or via API. + CompletionTriggerKindInvoked CompletionTriggerKind = 1 + + // CompletionTriggerKindTriggerCharacter completion was triggered by a trigger character specified by + // the `triggerCharacters` properties of the `CompletionRegistrationOptions`. + CompletionTriggerKindTriggerCharacter CompletionTriggerKind = 2 + + // CompletionTriggerKindTriggerForIncompleteCompletions completion was re-triggered as the current completion list is incomplete. + CompletionTriggerKindTriggerForIncompleteCompletions CompletionTriggerKind = 3 +) + +// String implements fmt.Stringer. +func (k CompletionTriggerKind) String() string { + switch k { + case CompletionTriggerKindInvoked: + return "Invoked" + case CompletionTriggerKindTriggerCharacter: + return "TriggerCharacter" + case CompletionTriggerKindTriggerForIncompleteCompletions: + return "TriggerForIncompleteCompletions" + default: + return strconv.FormatFloat(float64(k), 'f', -10, 64) + } +} + +// CompletionContext contains additional information about the context in which a completion request is triggered. +type CompletionContext struct { + // TriggerCharacter is the trigger character (a single character) that has trigger code complete. + // Is undefined if `triggerKind !== CompletionTriggerKind.TriggerCharacter` + TriggerCharacter string `json:"triggerCharacter,omitempty"` + + // TriggerKind how the completion was triggered. + TriggerKind CompletionTriggerKind `json:"triggerKind"` +} + +// CompletionList represents a collection of [completion items](#CompletionItem) to be presented +// in the editor. +type CompletionList struct { + // IsIncomplete this list it not complete. Further typing should result in recomputing + // this list. + IsIncomplete bool `json:"isIncomplete"` + + // Items is the completion items. + Items []CompletionItem `json:"items"` +} + +// InsertTextFormat defines whether the insert text in a completion item should be interpreted as +// plain text or a snippet. +type InsertTextFormat float64 + +const ( + // InsertTextFormatPlainText is the primary text to be inserted is treated as a plain string. + InsertTextFormatPlainText InsertTextFormat = 1 + + // InsertTextFormatSnippet is the primary text to be inserted is treated as a snippet. + // + // A snippet can define tab stops and placeholders with `$1`, `$2` + // and `${3:foo}`. `$0` defines the final tab stop, it defaults to + // the end of the snippet. Placeholders with equal identifiers are linked, + // that is typing in one will update others too. + InsertTextFormatSnippet InsertTextFormat = 2 +) + +// String implements fmt.Stringer. +func (tf InsertTextFormat) String() string { + switch tf { + case InsertTextFormatPlainText: + return "PlainText" + case InsertTextFormatSnippet: + return "Snippet" + default: + return strconv.FormatFloat(float64(tf), 'f', -10, 64) + } +} + +// InsertReplaceEdit is a special text edit to provide an insert and a replace operation. +// +// @since 3.16.0. +type InsertReplaceEdit struct { + // NewText is the string to be inserted. + NewText string `json:"newText"` + + // Insert is the range if the insert is requested. + Insert Range `json:"insert"` + + // Replace is the range if the replace is requested. + Replace Range `json:"replace"` +} + +// InsertTextMode how whitespace and indentation is handled during completion +// item insertion. +// +// @since 3.16.0. +type InsertTextMode float64 + +const ( + // AsIs is the insertion or replace strings is taken as it is. If the + // value is multi line the lines below the cursor will be + // inserted using the indentation defined in the string value. + // The client will not apply any kind of adjustments to the + // string. + InsertTextModeAsIs InsertTextMode = 1 + + // AdjustIndentation is the editor adjusts leading whitespace of new lines so that + // they match the indentation up to the cursor of the line for + // which the item is accepted. + // + // Consider a line like this: <2tabs><3tabs>foo. Accepting a + // multi line completion item is indented using 2 tabs and all + // following lines inserted will be indented using 2 tabs as well. + InsertTextModeAdjustIndentation InsertTextMode = 2 +) + +// String returns a string representation of the InsertTextMode. +func (k InsertTextMode) String() string { + switch k { + case InsertTextModeAsIs: + return "AsIs" + case InsertTextModeAdjustIndentation: + return "AdjustIndentation" + default: + return strconv.FormatFloat(float64(k), 'f', -10, 64) + } +} + +// CompletionItem item of CompletionList. +type CompletionItem struct { + // AdditionalTextEdits an optional array of additional text edits that are applied when + // selecting this completion. Edits must not overlap (including the same insert position) + // with the main edit nor with themselves. + // + // Additional text edits should be used to change text unrelated to the current cursor position + // (for example adding an import statement at the top of the file if the completion item will + // insert an unqualified type). + AdditionalTextEdits []TextEdit `json:"additionalTextEdits,omitempty"` + + // Command an optional command that is executed *after* inserting this completion. *Note* that + // additional modifications to the current document should be described with the + // additionalTextEdits-property. + Command *Command `json:"command,omitempty"` + + // CommitCharacters an optional set of characters that when pressed while this completion is active will accept it first and + // then type that character. *Note* that all commit characters should have `length=1` and that superfluous + // characters will be ignored. + CommitCharacters []string `json:"commitCharacters,omitempty"` + + // Tags is the tag for this completion item. + // + // @since 3.15.0. + Tags []CompletionItemTag `json:"tags,omitempty"` + + // Data an data entry field that is preserved on a completion item between + // a completion and a completion resolve request. + Data interface{} `json:"data,omitempty"` + + // Deprecated indicates if this item is deprecated. + Deprecated bool `json:"deprecated,omitempty"` + + // Detail a human-readable string with additional information + // about this item, like type or symbol information. + Detail string `json:"detail,omitempty"` + + // Documentation a human-readable string that represents a doc-comment. + Documentation interface{} `json:"documentation,omitempty"` + + // FilterText a string that should be used when filtering a set of + // completion items. When `falsy` the label is used. + FilterText string `json:"filterText,omitempty"` + + // InsertText a string that should be inserted into a document when selecting + // this completion. When `falsy` the label is used. + // + // The `insertText` is subject to interpretation by the client side. + // Some tools might not take the string literally. For example + // VS Code when code complete is requested in this example `con` + // and a completion item with an `insertText` of `console` is provided it + // will only insert `sole`. Therefore it is recommended to use `textEdit` instead + // since it avoids additional client side interpretation. + InsertText string `json:"insertText,omitempty"` + + // InsertTextFormat is the format of the insert text. The format applies to both the `insertText` property + // and the `newText` property of a provided `textEdit`. + InsertTextFormat InsertTextFormat `json:"insertTextFormat,omitempty"` + + // InsertTextMode how whitespace and indentation is handled during completion + // item insertion. If not provided the client's default value depends on + // the `textDocument.completion.insertTextMode` client capability. + // + // @since 3.16.0. + InsertTextMode InsertTextMode `json:"insertTextMode,omitempty"` + + // Kind is the kind of this completion item. Based of the kind + // an icon is chosen by the editor. + Kind CompletionItemKind `json:"kind,omitempty"` + + // Label is the label of this completion item. By default + // also the text that is inserted when selecting + // this completion. + Label string `json:"label"` + + // Preselect select this item when showing. + // + // *Note* that only one completion item can be selected and that the + // tool / client decides which item that is. The rule is that the *first* + // item of those that match best is selected. + Preselect bool `json:"preselect,omitempty"` + + // SortText a string that should be used when comparing this item + // with other items. When `falsy` the label is used. + SortText string `json:"sortText,omitempty"` + + // TextEdit an edit which is applied to a document when selecting this completion. When an edit is provided the value of + // `insertText` is ignored. + // + // NOTE: The range of the edit must be a single line range and it must contain the position at which completion + // has been requested. + // + // Most editors support two different operations when accepting a completion + // item. One is to insert a completion text and the other is to replace an + // existing text with a completion text. Since this can usually not be + // predetermined by a server it can report both ranges. Clients need to + // signal support for `InsertReplaceEdits` via the + // "textDocument.completion.insertReplaceSupport" client capability + // property. + // + // NOTE 1: The text edit's range as well as both ranges from an insert + // replace edit must be a [single line] and they must contain the position + // at which completion has been requested. + // + // NOTE 2: If an "InsertReplaceEdit" is returned the edit's insert range + // must be a prefix of the edit's replace range, that means it must be + // contained and starting at the same position. + // + // @since 3.16.0 additional type "InsertReplaceEdit". + TextEdit *TextEdit `json:"textEdit,omitempty"` // *TextEdit | *InsertReplaceEdit +} + +// CompletionItemKind is the completion item kind values the client supports. When this +// property exists the client also guarantees that it will +// handle values outside its set gracefully and falls back +// to a default value when unknown. +// +// If this property is not present the client only supports +// the completion items kinds from `Text` to `Reference` as defined in +// the initial version of the protocol. +type CompletionItemKind float64 + +const ( + // CompletionItemKindText text completion kind. + CompletionItemKindText CompletionItemKind = 1 + // CompletionItemKindMethod method completion kind. + CompletionItemKindMethod CompletionItemKind = 2 + // CompletionItemKindFunction function completion kind. + CompletionItemKindFunction CompletionItemKind = 3 + // CompletionItemKindConstructor constructor completion kind. + CompletionItemKindConstructor CompletionItemKind = 4 + // CompletionItemKindField field completion kind. + CompletionItemKindField CompletionItemKind = 5 + // CompletionItemKindVariable variable completion kind. + CompletionItemKindVariable CompletionItemKind = 6 + // CompletionItemKindClass class completion kind. + CompletionItemKindClass CompletionItemKind = 7 + // CompletionItemKindInterface interface completion kind. + CompletionItemKindInterface CompletionItemKind = 8 + // CompletionItemKindModule module completion kind. + CompletionItemKindModule CompletionItemKind = 9 + // CompletionItemKindProperty property completion kind. + CompletionItemKindProperty CompletionItemKind = 10 + // CompletionItemKindUnit unit completion kind. + CompletionItemKindUnit CompletionItemKind = 11 + // CompletionItemKindValue value completion kind. + CompletionItemKindValue CompletionItemKind = 12 + // CompletionItemKindEnum enum completion kind. + CompletionItemKindEnum CompletionItemKind = 13 + // CompletionItemKindKeyword keyword completion kind. + CompletionItemKindKeyword CompletionItemKind = 14 + // CompletionItemKindSnippet snippet completion kind. + CompletionItemKindSnippet CompletionItemKind = 15 + // CompletionItemKindColor color completion kind. + CompletionItemKindColor CompletionItemKind = 16 + // CompletionItemKindFile file completion kind. + CompletionItemKindFile CompletionItemKind = 17 + // CompletionItemKindReference reference completion kind. + CompletionItemKindReference CompletionItemKind = 18 + // CompletionItemKindFolder folder completion kind. + CompletionItemKindFolder CompletionItemKind = 19 + // CompletionItemKindEnumMember enum member completion kind. + CompletionItemKindEnumMember CompletionItemKind = 20 + // CompletionItemKindConstant constant completion kind. + CompletionItemKindConstant CompletionItemKind = 21 + // CompletionItemKindStruct struct completion kind. + CompletionItemKindStruct CompletionItemKind = 22 + // CompletionItemKindEvent event completion kind. + CompletionItemKindEvent CompletionItemKind = 23 + // CompletionItemKindOperator operator completion kind. + CompletionItemKindOperator CompletionItemKind = 24 + // CompletionItemKindTypeParameter type parameter completion kind. + CompletionItemKindTypeParameter CompletionItemKind = 25 +) + +// String implements fmt.Stringer. +//nolint:cyclop +func (k CompletionItemKind) String() string { + switch k { + case CompletionItemKindText: + return "Text" + case CompletionItemKindMethod: + return "Method" + case CompletionItemKindFunction: + return "Function" + case CompletionItemKindConstructor: + return "Constructor" + case CompletionItemKindField: + return "Field" + case CompletionItemKindVariable: + return "Variable" + case CompletionItemKindClass: + return "Class" + case CompletionItemKindInterface: + return "Interface" + case CompletionItemKindModule: + return "Module" + case CompletionItemKindProperty: + return "Property" + case CompletionItemKindUnit: + return "Unit" + case CompletionItemKindValue: + return "Value" + case CompletionItemKindEnum: + return "Enum" + case CompletionItemKindKeyword: + return "Keyword" + case CompletionItemKindSnippet: + return "Snippet" + case CompletionItemKindColor: + return "Color" + case CompletionItemKindFile: + return "File" + case CompletionItemKindReference: + return "Reference" + case CompletionItemKindFolder: + return "Folder" + case CompletionItemKindEnumMember: + return "EnumMember" + case CompletionItemKindConstant: + return "Constant" + case CompletionItemKindStruct: + return "Struct" + case CompletionItemKindEvent: + return "Event" + case CompletionItemKindOperator: + return "Operator" + case CompletionItemKindTypeParameter: + return "TypeParameter" + default: + return strconv.FormatFloat(float64(k), 'f', -10, 64) + } +} + +// CompletionItemTag completion item tags are extra annotations that tweak the rendering of a completion +// item. +// +// @since 3.15.0. +type CompletionItemTag float64 + +// list of CompletionItemTag. +const ( + // CompletionItemTagDeprecated is the render a completion as obsolete, usually using a strike-out. + CompletionItemTagDeprecated CompletionItemTag = 1 +) + +// String returns a string representation of the type. +func (c CompletionItemTag) String() string { + switch c { + case CompletionItemTagDeprecated: + return "Deprecated" + default: + return strconv.FormatFloat(float64(c), 'f', -10, 64) + } +} + +// CompletionRegistrationOptions CompletionRegistration options. +type CompletionRegistrationOptions struct { + TextDocumentRegistrationOptions + + // TriggerCharacters most tools trigger completion request automatically without explicitly requesting + // it using a keyboard shortcut (e.g. Ctrl+Space). Typically they do so when the user + // starts to type an identifier. For example if the user types `c` in a JavaScript file + // code complete will automatically pop up present `console` besides others as a + // completion item. Characters that make up identifiers don't need to be listed here. + // + // If code complete should automatically be trigger on characters not being valid inside + // an identifier (for example `.` in JavaScript) list them in `triggerCharacters`. + TriggerCharacters []string `json:"triggerCharacters,omitempty"` + + // ResolveProvider is the server provides support to resolve additional + // information for a completion item. + ResolveProvider bool `json:"resolveProvider,omitempty"` +} + +// HoverParams params of Hover request. +// +// @since 3.15.0. +type HoverParams struct { + TextDocumentPositionParams + WorkDoneProgressParams +} + +// Hover is the result of a hover request. +type Hover struct { + // Contents is the hover's content + Contents MarkupContent `json:"contents"` + + // Range an optional range is a range inside a text document + // that is used to visualize a hover, e.g. by changing the background color. + Range *Range `json:"range,omitempty"` +} + +// SignatureHelpParams params of SignatureHelp request. +// +// @since 3.15.0. +type SignatureHelpParams struct { + TextDocumentPositionParams + WorkDoneProgressParams + + // context is the signature help context. + // + // This is only available if the client specifies to send this using the + // client capability `textDocument.signatureHelp.contextSupport === true`. + // + // @since 3.15.0. + Context *SignatureHelpContext `json:"context,omitempty"` +} + +// SignatureHelpTriggerKind is the how a signature help was triggered. +// +// @since 3.15.0. +type SignatureHelpTriggerKind float64 + +// list of SignatureHelpTriggerKind. +const ( + // SignatureHelpTriggerKindInvoked is the signature help was invoked manually by the user or by a command. + SignatureHelpTriggerKindInvoked SignatureHelpTriggerKind = 1 + + // SignatureHelpTriggerKindTriggerCharacter is the signature help was triggered by a trigger character. + SignatureHelpTriggerKindTriggerCharacter SignatureHelpTriggerKind = 2 + + // SignatureHelpTriggerKindContentChange is the signature help was triggered by the cursor moving or + // by the document content changing. + SignatureHelpTriggerKindContentChange SignatureHelpTriggerKind = 3 +) + +// String returns a string representation of the type. +func (s SignatureHelpTriggerKind) String() string { + switch s { + case SignatureHelpTriggerKindInvoked: + return "Invoked" + case SignatureHelpTriggerKindTriggerCharacter: + return "TriggerCharacter" + case SignatureHelpTriggerKindContentChange: + return "ContentChange" + default: + return strconv.FormatFloat(float64(s), 'f', -10, 64) + } +} + +// SignatureHelpContext is the additional information about the context in which a +// signature help request was triggered. +// +// @since 3.15.0. +type SignatureHelpContext struct { + // TriggerKind is the action that caused signature help to be triggered. + TriggerKind SignatureHelpTriggerKind `json:"triggerKind"` + + // Character that caused signature help to be triggered. + // + // This is undefined when + // TriggerKind != SignatureHelpTriggerKindTriggerCharacter + TriggerCharacter string `json:"triggerCharacter,omitempty"` + + // IsRetrigger is the `true` if signature help was already showing when it was triggered. + // + // Retriggers occur when the signature help is already active and can be + // caused by actions such as typing a trigger character, a cursor move, + // or document content changes. + IsRetrigger bool `json:"isRetrigger"` + + // ActiveSignatureHelp is the currently active SignatureHelp. + // + // The `activeSignatureHelp` has its `SignatureHelp.activeSignature` field + // updated based on the user navigating through available signatures. + ActiveSignatureHelp *SignatureHelp `json:"activeSignatureHelp,omitempty"` +} + +// SignatureHelp signature help represents the signature of something +// callable. There can be multiple signature but only one +// active and only one active parameter. +type SignatureHelp struct { + // Signatures one or more signatures. + Signatures []SignatureInformation `json:"signatures"` + + // ActiveParameter is the active parameter of the active signature. If omitted or the value + // lies outside the range of `signatures[activeSignature].parameters` + // defaults to 0 if the active signature has parameters. If + // the active signature has no parameters it is ignored. + // In future version of the protocol this property might become + // mandatory to better express the active parameter if the + // active signature does have any. + ActiveParameter uint32 `json:"activeParameter,omitempty"` + + // ActiveSignature is the active signature. If omitted or the value lies outside the + // range of `signatures` the value defaults to zero or is ignored if + // `signatures.length === 0`. Whenever possible implementors should + // make an active decision about the active signature and shouldn't + // rely on a default value. + // In future version of the protocol this property might become + // mandatory to better express this. + ActiveSignature uint32 `json:"activeSignature,omitempty"` +} + +// SignatureInformation is the client supports the following `SignatureInformation` +// specific properties. +type SignatureInformation struct { + // Label is the label of this signature. Will be shown in + // the UI. + // + // @since 3.16.0. + Label string `json:"label"` + + // Documentation is the human-readable doc-comment of this signature. Will be shown + // in the UI but can be omitted. + // + // @since 3.16.0. + Documentation interface{} `json:"documentation,omitempty"` // string | *MarkupContent + + // Parameters is the parameters of this signature. + // + // @since 3.16.0. + Parameters []ParameterInformation `json:"parameters,omitempty"` + + // ActiveParameterSupport is the client supports the `activeParameter` property on + // `SignatureInformation` literal. + // + // @since 3.16.0. + ActiveParameter uint32 `json:"activeParameter,omitempty"` +} + +// ParameterInformation represents a parameter of a callable-signature. A parameter can +// have a label and a doc-comment. +type ParameterInformation struct { + // Label is the label of this parameter information. + // + // Either a string or an inclusive start and exclusive end offsets within its containing + // signature label. (see SignatureInformation.label). The offsets are based on a UTF-16 + // string representation as "Position" and "Range" does. + // + // *Note*: a label of type string should be a substring of its containing signature label. + // Its intended use case is to highlight the parameter label part in the "SignatureInformation.label". + Label string `json:"label"` // string | [uint32, uint32] + + // Documentation is the human-readable doc-comment of this parameter. Will be shown + // in the UI but can be omitted. + Documentation interface{} `json:"documentation,omitempty"` // string | MarkupContent +} + +// SignatureHelpRegistrationOptions SignatureHelp Registration options. +type SignatureHelpRegistrationOptions struct { + TextDocumentRegistrationOptions + + // TriggerCharacters is the characters that trigger signature help + // automatically. + TriggerCharacters []string `json:"triggerCharacters,omitempty"` +} + +// ReferenceParams params of References request. +// +// @since 3.15.0. +type ReferenceParams struct { + TextDocumentPositionParams + WorkDoneProgressParams + PartialResultParams + + // Context is the ReferenceParams context. + Context ReferenceContext `json:"context"` +} + +// ReferenceContext context of ReferenceParams. +type ReferenceContext struct { + // IncludeDeclaration include the declaration of the current symbol. + IncludeDeclaration bool `json:"includeDeclaration"` +} + +// DocumentHighlight a document highlight is a range inside a text document which deserves +// special attention. Usually a document highlight is visualized by changing +// the background color of its range. +type DocumentHighlight struct { + // Range is the range this highlight applies to. + Range Range `json:"range"` + + // Kind is the highlight kind, default is DocumentHighlightKind.Text. + Kind DocumentHighlightKind `json:"kind,omitempty"` +} + +// DocumentHighlightKind a document highlight kind. +type DocumentHighlightKind float64 + +const ( + // DocumentHighlightKindText a textual occurrence. + DocumentHighlightKindText DocumentHighlightKind = 1 + + // DocumentHighlightKindRead read-access of a symbol, like reading a variable. + DocumentHighlightKindRead DocumentHighlightKind = 2 + + // DocumentHighlightKindWrite write-access of a symbol, like writing to a variable. + DocumentHighlightKindWrite DocumentHighlightKind = 3 +) + +// String implements fmt.Stringer. +func (k DocumentHighlightKind) String() string { + switch k { + case DocumentHighlightKindText: + return "Text" + case DocumentHighlightKindRead: + return "Read" + case DocumentHighlightKindWrite: + return "Write" + default: + return strconv.FormatFloat(float64(k), 'f', -10, 64) + } +} + +// DocumentSymbolParams params of Document Symbols request. +type DocumentSymbolParams struct { + WorkDoneProgressParams + PartialResultParams + + // TextDocument is the text document. + TextDocument TextDocumentIdentifier `json:"textDocument"` +} + +// SymbolKind specific capabilities for the `SymbolKind`. +// The symbol kind values the client supports. When this +// property exists the client also guarantees that it will +// handle values outside its set gracefully and falls back +// to a default value when unknown. +// +// If this property is not present the client only supports +// the symbol kinds from `File` to `Array` as defined in +// the initial version of the protocol. +type SymbolKind float64 + +const ( + // SymbolKindFile symbol of file. + SymbolKindFile SymbolKind = 1 + // SymbolKindModule symbol of module. + SymbolKindModule SymbolKind = 2 + // SymbolKindNamespace symbol of namespace. + SymbolKindNamespace SymbolKind = 3 + // SymbolKindPackage symbol of package. + SymbolKindPackage SymbolKind = 4 + // SymbolKindClass symbol of class. + SymbolKindClass SymbolKind = 5 + // SymbolKindMethod symbol of method. + SymbolKindMethod SymbolKind = 6 + // SymbolKindProperty symbol of property. + SymbolKindProperty SymbolKind = 7 + // SymbolKindField symbol of field. + SymbolKindField SymbolKind = 8 + // SymbolKindConstructor symbol of constructor. + SymbolKindConstructor SymbolKind = 9 + // SymbolKindEnum symbol of enum. + SymbolKindEnum SymbolKind = 10 + // SymbolKindInterface symbol of interface. + SymbolKindInterface SymbolKind = 11 + // SymbolKindFunction symbol of function. + SymbolKindFunction SymbolKind = 12 + // SymbolKindVariable symbol of variable. + SymbolKindVariable SymbolKind = 13 + // SymbolKindConstant symbol of constant. + SymbolKindConstant SymbolKind = 14 + // SymbolKindString symbol of string. + SymbolKindString SymbolKind = 15 + // SymbolKindNumber symbol of number. + SymbolKindNumber SymbolKind = 16 + // SymbolKindBoolean symbol of boolean. + SymbolKindBoolean SymbolKind = 17 + // SymbolKindArray symbol of array. + SymbolKindArray SymbolKind = 18 + // SymbolKindObject symbol of object. + SymbolKindObject SymbolKind = 19 + // SymbolKindKey symbol of key. + SymbolKindKey SymbolKind = 20 + // SymbolKindNull symbol of null. + SymbolKindNull SymbolKind = 21 + // SymbolKindEnumMember symbol of enum member. + SymbolKindEnumMember SymbolKind = 22 + // SymbolKindStruct symbol of struct. + SymbolKindStruct SymbolKind = 23 + // SymbolKindEvent symbol of event. + SymbolKindEvent SymbolKind = 24 + // SymbolKindOperator symbol of operator. + SymbolKindOperator SymbolKind = 25 + // SymbolKindTypeParameter symbol of type parameter. + SymbolKindTypeParameter SymbolKind = 26 +) + +// String implements fmt.Stringer. +//nolint:cyclop +func (k SymbolKind) String() string { + switch k { + case SymbolKindFile: + return "File" + case SymbolKindModule: + return "Module" + case SymbolKindNamespace: + return "Namespace" + case SymbolKindPackage: + return "Package" + case SymbolKindClass: + return "Class" + case SymbolKindMethod: + return "Method" + case SymbolKindProperty: + return "Property" + case SymbolKindField: + return "Field" + case SymbolKindConstructor: + return "Constructor" + case SymbolKindEnum: + return "Enum" + case SymbolKindInterface: + return "Interface" + case SymbolKindFunction: + return "Function" + case SymbolKindVariable: + return "Variable" + case SymbolKindConstant: + return "Constant" + case SymbolKindString: + return "String" + case SymbolKindNumber: + return "Number" + case SymbolKindBoolean: + return "Boolean" + case SymbolKindArray: + return "Array" + case SymbolKindObject: + return "Object" + case SymbolKindKey: + return "Key" + case SymbolKindNull: + return "Null" + case SymbolKindEnumMember: + return "EnumMember" + case SymbolKindStruct: + return "Struct" + case SymbolKindEvent: + return "Event" + case SymbolKindOperator: + return "Operator" + case SymbolKindTypeParameter: + return "TypeParameter" + default: + return strconv.FormatFloat(float64(k), 'f', -10, 64) + } +} + +// SymbolTag symbol tags are extra annotations that tweak the rendering of a symbol. +// +// @since 3.16.0. +type SymbolTag float64 + +// list of SymbolTag. +const ( + // SymbolTagDeprecated render a symbol as obsolete, usually using a strike-out. + SymbolTagDeprecated SymbolTag = 1 +) + +// String returns a string representation of the SymbolTag. +func (k SymbolTag) String() string { + switch k { + case SymbolTagDeprecated: + return "Deprecated" + default: + return strconv.FormatFloat(float64(k), 'f', -10, 64) + } +} + +// DocumentSymbol represents programming constructs like variables, classes, interfaces etc. that appear in a document. Document symbols can be +// hierarchical and they have two ranges: one that encloses its definition and one that points to its most interesting range, +// e.g. the range of an identifier. +type DocumentSymbol struct { + // Name is the name of this symbol. Will be displayed in the user interface and therefore must not be + // an empty string or a string only consisting of white spaces. + Name string `json:"name"` + + // Detail is the more detail for this symbol, e.g the signature of a function. + Detail string `json:"detail,omitempty"` + + // Kind is the kind of this symbol. + Kind SymbolKind `json:"kind"` + + // Tags for this document symbol. + // + // @since 3.16.0. + Tags []SymbolTag `json:"tags,omitempty"` + + // Deprecated indicates if this symbol is deprecated. + Deprecated bool `json:"deprecated,omitempty"` + + // Range is the range enclosing this symbol not including leading/trailing whitespace but everything else + // like comments. This information is typically used to determine if the clients cursor is + // inside the symbol to reveal in the symbol in the UI. + Range Range `json:"range"` + + // SelectionRange is the range that should be selected and revealed when this symbol is being picked, e.g the name of a function. + // Must be contained by the `range`. + SelectionRange Range `json:"selectionRange"` + + // Children children of this symbol, e.g. properties of a class. + Children []DocumentSymbol `json:"children,omitempty"` +} + +// SymbolInformation represents information about programming constructs like variables, classes, +// interfaces etc. +type SymbolInformation struct { + // Name is the name of this symbol. + Name string `json:"name"` + + // Kind is the kind of this symbol. + Kind SymbolKind `json:"kind"` + + // Tags for this completion item. + // + // @since 3.16.0. + Tags []SymbolTag `json:"tags,omitempty"` + + // Deprecated indicates if this symbol is deprecated. + Deprecated bool `json:"deprecated,omitempty"` + + // Location is the location of this symbol. The location's range is used by a tool + // to reveal the location in the editor. If the symbol is selected in the + // tool the range's start information is used to position the cursor. So + // the range usually spans more then the actual symbol's name and does + // normally include things like visibility modifiers. + // + // The range doesn't have to denote a node range in the sense of a abstract + // syntax tree. It can therefore not be used to re-construct a hierarchy of + // the symbols. + Location Location `json:"location"` + + // ContainerName is the name of the symbol containing this symbol. This information is for + // user interface purposes (e.g. to render a qualifier in the user interface + // if necessary). It can't be used to re-infer a hierarchy for the document + // symbols. + ContainerName string `json:"containerName,omitempty"` +} + +// CodeActionParams params for the CodeActionRequest. +type CodeActionParams struct { + WorkDoneProgressParams + PartialResultParams + + // TextDocument is the document in which the command was invoked. + TextDocument TextDocumentIdentifier `json:"textDocument"` + + // Context carrying additional information. + Context CodeActionContext `json:"context"` + + // Range is the range for which the command was invoked. + Range Range `json:"range"` +} + +// CodeActionKind is the code action kind values the client supports. When this +// property exists the client also guarantees that it will +// handle values outside its set gracefully and falls back +// to a default value when unknown. +type CodeActionKind string + +// A set of predefined code action kinds. +const ( + // QuickFix base kind for quickfix actions: 'quickfix'. + QuickFix CodeActionKind = "quickfix" + + // Refactor base kind for refactoring actions: 'refactor'. + Refactor CodeActionKind = "refactor" + + // RefactorExtract base kind for refactoring extraction actions: 'refactor.extract' + // + // Example extract actions: + // + // - Extract method + // - Extract function + // - Extract variable + // - Extract interface from class + // - ... + RefactorExtract CodeActionKind = "refactor.extract" + + // RefactorInline base kind for refactoring inline actions: 'refactor.inline' + // + // Example inline actions: + // + // - Inline function + // - Inline variable + // - Inline constant + // - ... + RefactorInline CodeActionKind = "refactor.inline" + + // RefactorRewrite base kind for refactoring rewrite actions: 'refactor.rewrite' + // + // Example rewrite actions: + // + // - Convert JavaScript function to class + // - Add or remove parameter + // - Encapsulate field + // - Make method static + // - Move method to base class + // - ... + RefactorRewrite CodeActionKind = "refactor.rewrite" + + // Source base kind for source actions: `source` + // + // Source code actions apply to the entire file. + Source CodeActionKind = "source" + + // SourceOrganizeImports base kind for an organize imports source action: `source.organizeImports`. + SourceOrganizeImports CodeActionKind = "source.organizeImports" +) + +// CodeActionContext contains additional diagnostic information about the context in which +// a code action is run. +type CodeActionContext struct { + // Diagnostics is an array of diagnostics. + Diagnostics []Diagnostic `json:"diagnostics"` + + // Only requested kind of actions to return. + // + // Actions not of this kind are filtered out by the client before being shown. So servers + // can omit computing them. + Only []CodeActionKind `json:"only,omitempty"` +} + +// CodeAction capabilities specific to the `textDocument/codeAction`. +type CodeAction struct { + // Title is a short, human-readable, title for this code action. + Title string `json:"title"` + + // Kind is the kind of the code action. + // + // Used to filter code actions. + Kind CodeActionKind `json:"kind,omitempty"` + + // Diagnostics is the diagnostics that this code action resolves. + Diagnostics []Diagnostic `json:"diagnostics,omitempty"` + + // IsPreferred marks this as a preferred action. Preferred actions are used by the `auto fix` command and can be targeted + // by keybindings. + // + // A quick fix should be marked preferred if it properly addresses the underlying error. + // A refactoring should be marked preferred if it is the most reasonable choice of actions to take. + // + // @since 3.15.0. + IsPreferred bool `json:"isPreferred,omitempty"` + + // Disabled marks that the code action cannot currently be applied. + // + // Clients should follow the following guidelines regarding disabled code + // actions: + // + // - Disabled code actions are not shown in automatic lightbulbs code + // action menus. + // + // - Disabled actions are shown as faded out in the code action menu when + // the user request a more specific type of code action, such as + // refactorings. + // + // - If the user has a keybinding that auto applies a code action and only + // a disabled code actions are returned, the client should show the user + // an error message with `reason` in the editor. + // + // @since 3.16.0. + Disabled *CodeActionDisable `json:"disabled,omitempty"` + + // Edit is the workspace edit this code action performs. + Edit *WorkspaceEdit `json:"edit,omitempty"` + + // Command is a command this code action executes. If a code action + // provides an edit and a command, first the edit is + // executed and then the command. + Command *Command `json:"command,omitempty"` + + // Data is a data entry field that is preserved on a code action between + // a "textDocument/codeAction" and a "codeAction/resolve" request. + // + // @since 3.16.0. + Data interface{} `json:"data,omitempty"` +} + +// CodeActionDisable Disable in CodeAction. +// +// @since 3.16.0. +type CodeActionDisable struct { + // Reason human readable description of why the code action is currently + // disabled. + // + // This is displayed in the code actions UI. + Reason string `json:"reason"` +} + +// CodeActionRegistrationOptions CodeAction Registrationi options. +type CodeActionRegistrationOptions struct { + TextDocumentRegistrationOptions + + CodeActionOptions +} + +// CodeLensParams params of Code Lens request. +type CodeLensParams struct { + WorkDoneProgressParams + PartialResultParams + + // TextDocument is the document to request code lens for. + TextDocument TextDocumentIdentifier `json:"textDocument"` +} + +// CodeLens is a code lens represents a command that should be shown along with +// source text, like the number of references, a way to run tests, etc. +// +// A code lens is _unresolved_ when no command is associated to it. For performance +// reasons the creation of a code lens and resolving should be done in two stages. +type CodeLens struct { + // Range is the range in which this code lens is valid. Should only span a single line. + Range Range `json:"range"` + + // Command is the command this code lens represents. + Command *Command `json:"command,omitempty"` + + // Data is a data entry field that is preserved on a code lens item between + // a code lens and a code lens resolve request. + Data interface{} `json:"data,omitempty"` +} + +// CodeLensRegistrationOptions CodeLens Registration options. +type CodeLensRegistrationOptions struct { + TextDocumentRegistrationOptions + + // ResolveProvider code lens has a resolve provider as well. + ResolveProvider bool `json:"resolveProvider,omitempty"` +} + +// DocumentLinkParams params of Document Link request. +type DocumentLinkParams struct { + WorkDoneProgressParams + PartialResultParams + + // TextDocument is the document to provide document links for. + TextDocument TextDocumentIdentifier `json:"textDocument"` +} + +// DocumentLink is a document link is a range in a text document that links to an internal or external resource, like another +// text document or a web site. +type DocumentLink struct { + // Range is the range this link applies to. + Range Range `json:"range"` + + // Target is the uri this link points to. If missing a resolve request is sent later. + Target DocumentURI `json:"target,omitempty"` + + // Tooltip is the tooltip text when you hover over this link. + // + // If a tooltip is provided, is will be displayed in a string that includes instructions on how to + // trigger the link, such as `{0} (ctrl + click)`. The specific instructions vary depending on OS, + // user settings, and localization. + // + // @since 3.15.0. + Tooltip string `json:"tooltip,omitempty"` + + // Data is a data entry field that is preserved on a document link between a + // DocumentLinkRequest and a DocumentLinkResolveRequest. + Data interface{} `json:"data,omitempty"` +} + +// DocumentColorParams params of Document Color request. +type DocumentColorParams struct { + WorkDoneProgressParams + PartialResultParams + + // TextDocument is the document to format. + TextDocument TextDocumentIdentifier `json:"textDocument"` +} + +// ColorInformation response of Document Color request. +type ColorInformation struct { + // Range is the range in the document where this color appears. + Range Range `json:"range"` + + // Color is the actual color value for this color range. + Color Color `json:"color"` +} + +// Color represents a color in RGBA space. +type Color struct { + // Alpha is the alpha component of this color in the range [0-1]. + Alpha float64 `json:"alpha"` + + // Blue is the blue component of this color in the range [0-1]. + Blue float64 `json:"blue"` + + // Green is the green component of this color in the range [0-1]. + Green float64 `json:"green"` + + // Red is the red component of this color in the range [0-1]. + Red float64 `json:"red"` +} + +// ColorPresentationParams params of Color Presentation request. +type ColorPresentationParams struct { + WorkDoneProgressParams + PartialResultParams + + // TextDocument is the text document. + TextDocument TextDocumentIdentifier `json:"textDocument"` + + // Color is the color information to request presentations for. + Color Color `json:"color"` + + // Range is the range where the color would be inserted. Serves as a context. + Range Range `json:"range"` +} + +// ColorPresentation response of Color Presentation request. +type ColorPresentation struct { + // Label is the label of this color presentation. It will be shown on the color + // picker header. By default this is also the text that is inserted when selecting + // this color presentation. + Label string `json:"label"` + + // TextEdit an edit which is applied to a document when selecting + // this presentation for the color. When `falsy` the label is used. + TextEdit *TextEdit `json:"textEdit,omitempty"` + + // AdditionalTextEdits an optional array of additional [text edits](#TextEdit) that are applied when + // selecting this color presentation. Edits must not overlap with the main [edit](#ColorPresentation.textEdit) nor with themselves. + AdditionalTextEdits []TextEdit `json:"additionalTextEdits,omitempty"` +} + +// DocumentFormattingParams params of Document Formatting request. +type DocumentFormattingParams struct { + WorkDoneProgressParams + + // Options is the format options. + Options FormattingOptions `json:"options"` + + // TextDocument is the document to format. + TextDocument TextDocumentIdentifier `json:"textDocument"` +} + +// FormattingOptions value-object describing what options formatting should use. +type FormattingOptions struct { + // InsertSpaces prefer spaces over tabs. + InsertSpaces bool `json:"insertSpaces"` + + // TabSize size of a tab in spaces. + TabSize uint32 `json:"tabSize"` + + // TrimTrailingWhitespace trim trailing whitespaces on a line. + // + // @since 3.15.0. + TrimTrailingWhitespace bool `json:"trimTrailingWhitespace,omitempty"` + + // InsertFinalNewlines insert a newline character at the end of the file if one does not exist. + // + // @since 3.15.0. + InsertFinalNewline bool `json:"insertFinalNewline,omitempty"` + + // TrimFinalNewlines trim all newlines after the final newline at the end of the file. + // + // @since 3.15.0. + TrimFinalNewlines bool `json:"trimFinalNewlines,omitempty"` + + // Key is the signature for further properties. + Key map[string]interface{} `json:"key,omitempty"` // bool | int32 | string +} + +// DocumentRangeFormattingParams params of Document Range Formatting request. +type DocumentRangeFormattingParams struct { + WorkDoneProgressParams + + // TextDocument is the document to format. + TextDocument TextDocumentIdentifier `json:"textDocument"` + + // Range is the range to format + Range Range `json:"range"` + + // Options is the format options. + Options FormattingOptions `json:"options"` +} + +// DocumentOnTypeFormattingParams params of Document on Type Formatting request. +type DocumentOnTypeFormattingParams struct { + // TextDocument is the document to format. + TextDocument TextDocumentIdentifier `json:"textDocument"` + + // Position is the position at which this request was sent. + Position Position `json:"position"` + + // Ch is the character that has been typed. + Ch string `json:"ch"` + + // Options is the format options. + Options FormattingOptions `json:"options"` +} + +// DocumentOnTypeFormattingRegistrationOptions DocumentOnTypeFormatting Registration options. +type DocumentOnTypeFormattingRegistrationOptions struct { + TextDocumentRegistrationOptions + + // FirstTriggerCharacter a character on which formatting should be triggered, like `}`. + FirstTriggerCharacter string `json:"firstTriggerCharacter"` + + // MoreTriggerCharacter a More trigger characters. + MoreTriggerCharacter []string `json:"moreTriggerCharacter"` +} + +// RenameParams params of Rename request. +type RenameParams struct { + TextDocumentPositionParams + PartialResultParams + + // NewName is the new name of the symbol. If the given name is not valid the + // request must return a [ResponseError](#ResponseError) with an + // appropriate message set. + NewName string `json:"newName"` +} + +// RenameRegistrationOptions Rename Registration options. +type RenameRegistrationOptions struct { + TextDocumentRegistrationOptions + + // PrepareProvider is the renames should be checked and tested for validity before being executed. + PrepareProvider bool `json:"prepareProvider,omitempty"` +} + +// PrepareRenameParams params of PrepareRenameParams request. +// +// @since 3.15.0. +type PrepareRenameParams struct { + TextDocumentPositionParams +} + +// FoldingRangeParams params of Folding Range request. +type FoldingRangeParams struct { + TextDocumentPositionParams + PartialResultParams +} + +// FoldingRangeKind is the enum of known range kinds. +type FoldingRangeKind string + +const ( + // CommentFoldingRange is the folding range for a comment. + CommentFoldingRange FoldingRangeKind = "comment" + + // ImportsFoldingRange is the folding range for a imports or includes. + ImportsFoldingRange FoldingRangeKind = "imports" + + // RegionFoldingRange is the folding range for a region (e.g. `#region`). + RegionFoldingRange FoldingRangeKind = "region" +) + +// FoldingRange capabilities specific to `textDocument/foldingRange` requests. +// +// @since 3.10.0. +type FoldingRange struct { + // StartLine is the zero-based line number from where the folded range starts. + StartLine uint32 `json:"startLine"` + + // StartCharacter is the zero-based character offset from where the folded range starts. If not defined, defaults to the length of the start line. + StartCharacter uint32 `json:"startCharacter,omitempty"` + + // EndLine is the zero-based line number where the folded range ends. + EndLine uint32 `json:"endLine"` + + // EndCharacter is the zero-based character offset before the folded range ends. If not defined, defaults to the length of the end line. + EndCharacter uint32 `json:"endCharacter,omitempty"` + + // Kind describes the kind of the folding range such as `comment' or 'region'. The kind + // is used to categorize folding ranges and used by commands like 'Fold all comments'. + // See FoldingRangeKind for an enumeration of standardized kinds. + Kind FoldingRangeKind `json:"kind,omitempty"` +} diff --git a/vendor/go.lsp.dev/protocol/log.go b/vendor/go.lsp.dev/protocol/log.go new file mode 100644 index 00000000000..0b7c9aafc69 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/log.go @@ -0,0 +1,156 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import ( + "bytes" + "context" + "fmt" + "io" + "sync" + "time" + + "go.lsp.dev/jsonrpc2" +) + +// loggingStream represents a logging of jsonrpc2.Stream. +type loggingStream struct { + stream jsonrpc2.Stream + log io.Writer + logMu sync.Mutex +} + +// LoggingStream returns a stream that does LSP protocol logging. +func LoggingStream(stream jsonrpc2.Stream, w io.Writer) jsonrpc2.Stream { + return &loggingStream{ + stream: stream, + log: w, + } +} + +// Read implements jsonrpc2.Stream.Read. +func (s *loggingStream) Read(ctx context.Context) (jsonrpc2.Message, int64, error) { + msg, count, err := s.stream.Read(ctx) + if err == nil { + s.logCommon(msg, true) + } + + return msg, count, err +} + +// Write implements jsonrpc2.Stream.Write. +func (s *loggingStream) Write(ctx context.Context, msg jsonrpc2.Message) (int64, error) { + s.logCommon(msg, false) + count, err := s.stream.Write(ctx, msg) + + return count, err +} + +// Close implements jsonrpc2.Stream.Close. +func (s *loggingStream) Close() error { + return s.stream.Close() +} + +type req struct { + method string + start time.Time +} + +type mapped struct { + mu sync.Mutex + clientCalls map[string]req + serverCalls map[string]req +} + +var maps = &mapped{ + mu: sync.Mutex{}, + clientCalls: make(map[string]req), + serverCalls: make(map[string]req), +} + +// these 4 methods are each used exactly once, but it seemed +// better to have the encapsulation rather than ad hoc mutex +// code in 4 places. +func (m *mapped) client(id string) req { + m.mu.Lock() + v := m.clientCalls[id] + delete(m.clientCalls, id) + m.mu.Unlock() + + return v +} + +func (m *mapped) server(id string) req { + m.mu.Lock() + v := m.serverCalls[id] + delete(m.serverCalls, id) + m.mu.Unlock() + + return v +} + +func (m *mapped) setClient(id string, r req) { + m.mu.Lock() + m.clientCalls[id] = r + m.mu.Unlock() +} + +func (m *mapped) setServer(id string, r req) { + m.mu.Lock() + m.serverCalls[id] = r + m.mu.Unlock() +} + +const eor = "\r\n\r\n\r\n" + +func (s *loggingStream) logCommon(msg jsonrpc2.Message, isRead bool) { + if msg == nil || s.log == nil { + return + } + + s.logMu.Lock() + + direction, pastTense := "Received", "Received" + get, set := maps.client, maps.setServer + if isRead { + direction, pastTense = "Sending", "Sent" + get, set = maps.server, maps.setClient + } + + tm := time.Now() + tmfmt := tm.Format("15:04:05.000 PM") + + var buf bytes.Buffer + fmt.Fprintf(&buf, "[Trace - %s] ", tmfmt) // common beginning + + switch msg := msg.(type) { + case *jsonrpc2.Call: + id := fmt.Sprint(msg.ID()) + fmt.Fprintf(&buf, "%s request '%s - (%s)'.\n", direction, msg.Method(), id) + fmt.Fprintf(&buf, "Params: %s%s", msg.Params(), eor) + set(id, req{method: msg.Method(), start: tm}) + + case *jsonrpc2.Notification: + fmt.Fprintf(&buf, "%s notification '%s'.\n", direction, msg.Method()) + fmt.Fprintf(&buf, "Params: %s%s", msg.Params(), eor) + + case *jsonrpc2.Response: + id := fmt.Sprint(msg.ID()) + if err := msg.Err(); err != nil { + fmt.Fprintf(s.log, "[Error - %s] %s #%s %s%s", pastTense, tmfmt, id, err, eor) + + return + } + + cc := get(id) + elapsed := tm.Sub(cc.start) + fmt.Fprintf(&buf, "%s response '%s - (%s)' in %dms.\n", + direction, cc.method, id, elapsed/time.Millisecond) + fmt.Fprintf(&buf, "Result: %s%s", msg.Result(), eor) + } + + s.log.Write(buf.Bytes()) + + s.logMu.Unlock() +} diff --git a/vendor/go.lsp.dev/protocol/progress.go b/vendor/go.lsp.dev/protocol/progress.go new file mode 100644 index 00000000000..d1a2e9f6d7a --- /dev/null +++ b/vendor/go.lsp.dev/protocol/progress.go @@ -0,0 +1,119 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +// WorkDoneProgressKind kind of WorkDoneProgress. +// +// @since 3.15.0. +type WorkDoneProgressKind string + +// list of WorkDoneProgressKind. +const ( + // WorkDoneProgressKindBegin kind of WorkDoneProgressBegin. + WorkDoneProgressKindBegin WorkDoneProgressKind = "begin" + + // WorkDoneProgressKindReport kind of WorkDoneProgressReport. + WorkDoneProgressKindReport WorkDoneProgressKind = "report" + + // WorkDoneProgressKindEnd kind of WorkDoneProgressEnd. + WorkDoneProgressKindEnd WorkDoneProgressKind = "end" +) + +// WorkDoneProgressBegin is the to start progress reporting a "$/progress" notification. +// +// @since 3.15.0. +type WorkDoneProgressBegin struct { + // Kind is the kind of WorkDoneProgressBegin. + // + // It must be WorkDoneProgressKindBegin. + Kind WorkDoneProgressKind `json:"kind"` + + // Title mandatory title of the progress operation. Used to briefly inform about + // the kind of operation being performed. + // + // Examples: "Indexing" or "Linking dependencies". + Title string `json:"title"` + + // Cancellable controls if a cancel button should show to allow the user to cancel the + // long running operation. Clients that don't support cancellation are allowed + // to ignore the setting. + Cancellable bool `json:"cancellable,omitempty"` + + // Message is optional, more detailed associated progress message. Contains + // complementary information to the `title`. + // + // Examples: "3/25 files", "project/src/module2", "node_modules/some_dep". + // If unset, the previous progress message (if any) is still valid. + Message string `json:"message,omitempty"` + + // Percentage is optional progress percentage to display (value 100 is considered 100%). + // If not provided infinite progress is assumed and clients are allowed + // to ignore the `percentage` value in subsequent in report notifications. + // + // The value should be steadily rising. Clients are free to ignore values + // that are not following this rule. + Percentage uint32 `json:"percentage,omitempty"` +} + +// WorkDoneProgressReport is the reporting progress is done. +// +// @since 3.15.0. +type WorkDoneProgressReport struct { + // Kind is the kind of WorkDoneProgressReport. + // + // It must be WorkDoneProgressKindReport. + Kind WorkDoneProgressKind `json:"kind"` + + // Cancellable controls enablement state of a cancel button. + // + // Clients that don't support cancellation or don't support controlling the button's + // enablement state are allowed to ignore the property. + Cancellable bool `json:"cancellable,omitempty"` + + // Message is optional, more detailed associated progress message. Contains + // complementary information to the `title`. + // + // Examples: "3/25 files", "project/src/module2", "node_modules/some_dep". + // If unset, the previous progress message (if any) is still valid. + Message string `json:"message,omitempty"` + + // Percentage is optional progress percentage to display (value 100 is considered 100%). + // If not provided infinite progress is assumed and clients are allowed + // to ignore the `percentage` value in subsequent in report notifications. + // + // The value should be steadily rising. Clients are free to ignore values + // that are not following this rule. + Percentage uint32 `json:"percentage,omitempty"` +} + +// WorkDoneProgressEnd is the signaling the end of a progress reporting is done. +// +// @since 3.15.0. +type WorkDoneProgressEnd struct { + // Kind is the kind of WorkDoneProgressEnd. + // + // It must be WorkDoneProgressKindEnd. + Kind WorkDoneProgressKind `json:"kind"` + + // Message is optional, a final message indicating to for example indicate the outcome + // of the operation. + Message string `json:"message,omitempty"` +} + +// WorkDoneProgressParams is a parameter property of report work done progress. +// +// @since 3.15.0. +type WorkDoneProgressParams struct { + // WorkDoneToken an optional token that a server can use to report work done progress. + WorkDoneToken *ProgressToken `json:"workDoneToken,omitempty"` +} + +// PartialResultParams is the parameter literal used to pass a partial result token. +// +// @since 3.15.0. +type PartialResultParams struct { + // PartialResultToken an optional token that a server can use to report partial results + // (for example, streaming) to the client. + PartialResultToken *ProgressToken `json:"partialResultToken,omitempty"` +} diff --git a/vendor/go.lsp.dev/protocol/protocol.go b/vendor/go.lsp.dev/protocol/protocol.go new file mode 100644 index 00000000000..7d90b255573 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/protocol.go @@ -0,0 +1,42 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import ( + "context" + + "go.uber.org/zap" + + "go.lsp.dev/jsonrpc2" +) + +// NewServer returns the context in which client is embedded, jsonrpc2.Conn, and the Client. +func NewServer(ctx context.Context, server Server, stream jsonrpc2.Stream, logger *zap.Logger) (context.Context, jsonrpc2.Conn, Client) { + conn := jsonrpc2.NewConn(stream) + cliint := ClientDispatcher(conn, logger.Named("client")) + ctx = WithClient(ctx, cliint) + + conn.Go(ctx, + Handlers( + ServerHandler(server, jsonrpc2.MethodNotFoundHandler), + ), + ) + + return ctx, conn, cliint +} + +// NewClient returns the context in which Client is embedded, jsonrpc2.Conn, and the Server. +func NewClient(ctx context.Context, client Client, stream jsonrpc2.Stream, logger *zap.Logger) (context.Context, jsonrpc2.Conn, Server) { + ctx = WithClient(ctx, client) + + conn := jsonrpc2.NewConn(stream) + conn.Go(ctx, + Handlers( + ClientHandler(client, jsonrpc2.MethodNotFoundHandler), + ), + ) + server := ServerDispatcher(conn, logger.Named("server")) + + return ctx, conn, server +} diff --git a/vendor/go.lsp.dev/protocol/registration.go b/vendor/go.lsp.dev/protocol/registration.go new file mode 100644 index 00000000000..a2abb43801a --- /dev/null +++ b/vendor/go.lsp.dev/protocol/registration.go @@ -0,0 +1,44 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +// Registration general parameters to register for a capability. +type Registration struct { + // ID is the id used to register the request. The id can be used to deregister + // the request again. + ID string `json:"id"` + + // Method is the method / capability to register for. + Method string `json:"method"` + + // RegisterOptions options necessary for the registration. + RegisterOptions interface{} `json:"registerOptions,omitempty"` +} + +// RegistrationParams params of Register Capability. +type RegistrationParams struct { + Registrations []Registration `json:"registrations"` +} + +// TextDocumentRegistrationOptions TextDocumentRegistration options. +type TextDocumentRegistrationOptions struct { + // DocumentSelector a document selector to identify the scope of the registration. If set to null + // the document selector provided on the client side will be used. + DocumentSelector DocumentSelector `json:"documentSelector"` +} + +// Unregistration general parameters to unregister a capability. +type Unregistration struct { + // ID is the id used to unregister the request or notification. Usually an id + // provided during the register request. + ID string `json:"id"` + + // Method is the method / capability to unregister for. + Method string `json:"method"` +} + +// UnregistrationParams params of Unregistration. +type UnregistrationParams struct { + Unregisterations []Unregistration `json:"unregisterations"` +} diff --git a/vendor/go.lsp.dev/protocol/selectionrange.go b/vendor/go.lsp.dev/protocol/selectionrange.go new file mode 100644 index 00000000000..c4cd16913af --- /dev/null +++ b/vendor/go.lsp.dev/protocol/selectionrange.go @@ -0,0 +1,110 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +// SelectionRangeProviderOptions selection range provider options interface. +type SelectionRangeProviderOptions interface{} + +// SelectionRange represents a selection range represents a part of a selection hierarchy. +// +// A selection range may have a parent selection range that contains it. +// +// @since 3.15.0. +type SelectionRange struct { + // Range is the Range of this selection range. + Range Range `json:"range"` + + // Parent is the parent selection range containing this range. Therefore `parent.range` must contain this Range. + Parent *SelectionRange `json:"parent,omitempty"` +} + +// EnableSelectionRange is the whether the selection range. +type EnableSelectionRange bool + +// compile time check whether the EnableSelectionRange implements a SelectionRangeProviderOptions interface. +var _ SelectionRangeProviderOptions = (*EnableSelectionRange)(nil) + +// Value implements SelectionRangeProviderOptions interface. +func (v EnableSelectionRange) Value() interface{} { + return bool(v) +} + +// NewEnableSelectionRange returns the new EnableSelectionRange underlying types SelectionRangeProviderOptions. +func NewEnableSelectionRange(enable bool) SelectionRangeProviderOptions { + v := EnableSelectionRange(enable) + + return &v +} + +// SelectionRangeOptions is the server capability of selection range. +type SelectionRangeOptions struct { + WorkDoneProgressOptions +} + +// compile time check whether the EnableSelectionRange implements a SelectionRangeProviderOptions interface. +var _ SelectionRangeProviderOptions = (*EnableSelectionRange)(nil) + +// Value implements SelectionRangeProviderOptions interface. +func (v *SelectionRangeOptions) Value() interface{} { + return v +} + +// NewSelectionRangeOptions returns the new SelectionRangeOptions underlying types SelectionRangeProviderOptions. +func NewSelectionRangeOptions(enableWorkDoneProgress bool) SelectionRangeProviderOptions { + v := SelectionRangeOptions{ + WorkDoneProgressOptions: WorkDoneProgressOptions{ + WorkDoneProgress: enableWorkDoneProgress, + }, + } + + return &v +} + +// SelectionRangeRegistrationOptions is the server capability of selection range registration. +type SelectionRangeRegistrationOptions struct { + SelectionRangeOptions + TextDocumentRegistrationOptions + StaticRegistrationOptions +} + +// compile time check whether the SelectionRangeRegistrationOptions implements a SelectionRangeProviderOptions interface. +var _ SelectionRangeProviderOptions = (*SelectionRangeRegistrationOptions)(nil) + +// Value implements SelectionRangeProviderOptions interface. +func (v *SelectionRangeRegistrationOptions) Value() interface{} { + return v +} + +// NewSelectionRangeRegistrationOptions returns the new SelectionRangeRegistrationOptions underlying types SelectionRangeProviderOptions. +func NewSelectionRangeRegistrationOptions(enableWorkDoneProgress bool, selector DocumentSelector, id string) SelectionRangeProviderOptions { + v := SelectionRangeRegistrationOptions{ + SelectionRangeOptions: SelectionRangeOptions{ + WorkDoneProgressOptions: WorkDoneProgressOptions{ + WorkDoneProgress: enableWorkDoneProgress, + }, + }, + TextDocumentRegistrationOptions: TextDocumentRegistrationOptions{ + DocumentSelector: selector, + }, + StaticRegistrationOptions: StaticRegistrationOptions{ + ID: id, + }, + } + + return &v +} + +// SelectionRangeParams represents a parameter literal used in selection range requests. +// +// @since 3.15.0. +type SelectionRangeParams struct { + WorkDoneProgressParams + PartialResultParams + + // TextDocument is the text document. + TextDocument TextDocumentIdentifier `json:"textDocument"` + + // Positions is the positions inside the text document. + Positions []Position `json:"positions"` +} diff --git a/vendor/go.lsp.dev/protocol/semantic_token.go b/vendor/go.lsp.dev/protocol/semantic_token.go new file mode 100644 index 00000000000..c2d1f3a4db5 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/semantic_token.go @@ -0,0 +1,179 @@ +// SPDX-FileCopyrightText: 2021 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +// SemanticTokenTypes represents a type of semantic token. +// +// @since 3.16.0. +type SemanticTokenTypes string + +// list of SemanticTokenTypes. +const ( + SemanticTokenNamespace SemanticTokenTypes = "namespace" + + // Represents a generic type. Acts as a fallback for types which + // can't be mapped to a specific type like class or enum. + SemanticTokenType SemanticTokenTypes = "type" + SemanticTokenClass SemanticTokenTypes = "class" + SemanticTokenEnum SemanticTokenTypes = "enum" + SemanticTokenInterface SemanticTokenTypes = "interface" + SemanticTokenStruct SemanticTokenTypes = "struct" + SemanticTokenTypeParameter SemanticTokenTypes = "typeParameter" + SemanticTokenParameter SemanticTokenTypes = "parameter" + SemanticTokenVariable SemanticTokenTypes = "variable" + SemanticTokenProperty SemanticTokenTypes = "property" + SemanticTokenEnumMember SemanticTokenTypes = "enumMember" + SemanticTokenEvent SemanticTokenTypes = "event" + SemanticTokenFunction SemanticTokenTypes = "function" + SemanticTokenMethod SemanticTokenTypes = "method" + SemanticTokenMacro SemanticTokenTypes = "macro" + SemanticTokenKeyword SemanticTokenTypes = "keyword" + SemanticTokenModifier SemanticTokenTypes = "modifier" + SemanticTokenComment SemanticTokenTypes = "comment" + SemanticTokenString SemanticTokenTypes = "string" + SemanticTokenNumber SemanticTokenTypes = "number" + SemanticTokenRegexp SemanticTokenTypes = "regexp" + SemanticTokenOperator SemanticTokenTypes = "operator" +) + +// SemanticTokenModifiers represents a modifiers of semantic token. +// +// @since 3.16.0. +type SemanticTokenModifiers string + +// list of SemanticTokenModifiers. +const ( + SemanticTokenModifierDeclaration SemanticTokenModifiers = "declaration" + SemanticTokenModifierDefinition SemanticTokenModifiers = "definition" + SemanticTokenModifierReadonly SemanticTokenModifiers = "readonly" + SemanticTokenModifierStatic SemanticTokenModifiers = "static" + SemanticTokenModifierDeprecated SemanticTokenModifiers = "deprecated" + SemanticTokenModifierAbstract SemanticTokenModifiers = "abstract" + SemanticTokenModifierAsync SemanticTokenModifiers = "async" + SemanticTokenModifierModification SemanticTokenModifiers = "modification" + SemanticTokenModifierDocumentation SemanticTokenModifiers = "documentation" + SemanticTokenModifierDefaultLibrary SemanticTokenModifiers = "defaultLibrary" +) + +// TokenFormat is an additional token format capability to allow future extensions of the format. +// +// @since 3.16.0. +type TokenFormat string + +// TokenFormatRelative described using relative positions. +const TokenFormatRelative TokenFormat = "relative" + +// SemanticTokensLegend is the on the capability level types and modifiers are defined using strings. +// +// However the real encoding happens using numbers. +// +// The server therefore needs to let the client know which numbers it is using for which types and modifiers. +// +// @since 3.16.0. +type SemanticTokensLegend struct { + // TokenTypes is the token types a server uses. + TokenTypes []SemanticTokenTypes `json:"tokenTypes"` + + // TokenModifiers is the token modifiers a server uses. + TokenModifiers []SemanticTokenModifiers `json:"tokenModifiers"` +} + +// SemanticTokensParams params for the SemanticTokensFull request. +// +// @since 3.16.0. +type SemanticTokensParams struct { + WorkDoneProgressParams + PartialResultParams + + // TextDocument is the text document. + TextDocument TextDocumentIdentifier `json:"textDocument"` +} + +// SemanticTokens is the result of SemanticTokensFull request. +// +// @since 3.16.0. +type SemanticTokens struct { + // ResultID an optional result id. If provided and clients support delta updating + // the client will include the result id in the next semantic token request. + // + // A server can then instead of computing all semantic tokens again simply + // send a delta. + ResultID string `json:"resultId,omitempty"` + + // Data is the actual tokens. + Data []uint32 `json:"data"` +} + +// SemanticTokensPartialResult is the partial result of SemanticTokensFull request. +// +// @since 3.16.0. +type SemanticTokensPartialResult struct { + // Data is the actual tokens. + Data []uint32 `json:"data"` +} + +// SemanticTokensDeltaParams params for the SemanticTokensFullDelta request. +// +// @since 3.16.0. +type SemanticTokensDeltaParams struct { + WorkDoneProgressParams + PartialResultParams + + // TextDocument is the text document. + TextDocument TextDocumentIdentifier `json:"textDocument"` + + // PreviousResultID is the result id of a previous response. + // + // The result Id can either point to a full response or a delta response depending on what was received last. + PreviousResultID string `json:"previousResultId"` +} + +// SemanticTokensDelta result of SemanticTokensFullDelta request. +// +// @since 3.16.0. +type SemanticTokensDelta struct { + // ResultID is the result id. + // + // This field is readonly. + ResultID string `json:"resultId,omitempty"` + + // Edits is the semantic token edits to transform a previous result into a new + // result. + Edits []SemanticTokensEdit `json:"edits"` +} + +// SemanticTokensDeltaPartialResult is the partial result of SemanticTokensFullDelta request. +// +// @since 3.16.0. +type SemanticTokensDeltaPartialResult struct { + Edits []SemanticTokensEdit `json:"edits"` +} + +// SemanticTokensEdit is the semantic token edit. +// +// @since 3.16.0. +type SemanticTokensEdit struct { + // Start is the start offset of the edit. + Start uint32 `json:"start"` + + // DeleteCount is the count of elements to remove. + DeleteCount uint32 `json:"deleteCount"` + + // Data is the elements to insert. + Data []uint32 `json:"data,omitempty"` +} + +// SemanticTokensRangeParams params for the SemanticTokensRange request. +// +// @since 3.16.0. +type SemanticTokensRangeParams struct { + WorkDoneProgressParams + PartialResultParams + + // TextDocument is the text document. + TextDocument TextDocumentIdentifier `json:"textDocument"` + + // Range is the range the semantic tokens are requested for. + Range Range `json:"range"` +} diff --git a/vendor/go.lsp.dev/protocol/server.go b/vendor/go.lsp.dev/protocol/server.go new file mode 100644 index 00000000000..6f96161ccb2 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/server.go @@ -0,0 +1,1892 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import ( + "bytes" + "context" + "fmt" + + "github.com/segmentio/encoding/json" + "go.uber.org/zap" + + "go.lsp.dev/jsonrpc2" + "go.lsp.dev/pkg/xcontext" +) + +// ServerDispatcher returns a Server that dispatches LSP requests across the +// given jsonrpc2 connection. +func ServerDispatcher(conn jsonrpc2.Conn, logger *zap.Logger) Server { + return &server{ + Conn: conn, + logger: logger, + } +} + +// ServerHandler jsonrpc2.Handler of Language Server Prococol Server. +//nolint:unparam +func ServerHandler(server Server, handler jsonrpc2.Handler) jsonrpc2.Handler { + h := func(ctx context.Context, reply jsonrpc2.Replier, req jsonrpc2.Request) error { + if ctx.Err() != nil { + xctx := xcontext.Detach(ctx) + + return reply(xctx, nil, ErrRequestCancelled) + } + handled, err := serverDispatch(ctx, server, reply, req) + if handled || err != nil { + return err + } + + // TODO: This code is wrong, it ignores handler and assumes non standard + // request handles everything + // non standard request should just be a layered handler. + var params interface{} + if err := json.Unmarshal(req.Params(), ¶ms); err != nil { + return replyParseError(ctx, reply, err) + } + + resp, err := server.Request(ctx, req.Method(), params) + + return reply(ctx, resp, err) + } + + return h +} + +// serverDispatch implements jsonrpc2.Handler. +//nolint:gocognit,funlen,gocyclo,cyclop +func serverDispatch(ctx context.Context, server Server, reply jsonrpc2.Replier, req jsonrpc2.Request) (handled bool, err error) { + if ctx.Err() != nil { + return true, reply(ctx, nil, ErrRequestCancelled) + } + + dec := json.NewDecoder(bytes.NewReader(req.Params())) + logger := LoggerFromContext(ctx) + + switch req.Method() { + case MethodInitialize: // request + defer logger.Debug(MethodInitialize, zap.Error(err)) + + var params InitializeParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.Initialize(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodInitialized: // notification + defer logger.Debug(MethodInitialized, zap.Error(err)) + + var params InitializedParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.Initialized(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodShutdown: // request + defer logger.Debug(MethodShutdown, zap.Error(err)) + + if len(req.Params()) > 0 { + return true, reply(ctx, nil, fmt.Errorf("expected no params: %w", jsonrpc2.ErrInvalidParams)) + } + + err := server.Shutdown(ctx) + + return true, reply(ctx, nil, err) + + case MethodExit: // notification + defer logger.Debug(MethodExit, zap.Error(err)) + + if len(req.Params()) > 0 { + return true, reply(ctx, nil, fmt.Errorf("expected no params: %w", jsonrpc2.ErrInvalidParams)) + } + + err := server.Exit(ctx) + + return true, reply(ctx, nil, err) + + case MethodWorkDoneProgressCancel: // notification + defer logger.Debug(MethodWorkDoneProgressCancel, zap.Error(err)) + + var params WorkDoneProgressCancelParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.WorkDoneProgressCancel(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodLogTrace: // notification + defer logger.Debug(MethodLogTrace, zap.Error(err)) + + var params LogTraceParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.LogTrace(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodSetTrace: // notification + defer logger.Debug(MethodSetTrace, zap.Error(err)) + + var params SetTraceParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.SetTrace(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodTextDocumentCodeAction: // request + defer logger.Debug(MethodTextDocumentCodeAction, zap.Error(err)) + + var params CodeActionParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.CodeAction(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentCodeLens: // request + defer logger.Debug(MethodTextDocumentCodeLens, zap.Error(err)) + + var params CodeLensParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.CodeLens(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodCodeLensResolve: // request + defer logger.Debug(MethodCodeLensResolve, zap.Error(err)) + + var params CodeLens + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.CodeLensResolve(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentColorPresentation: // request + defer logger.Debug(MethodTextDocumentColorPresentation, zap.Error(err)) + + var params ColorPresentationParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.ColorPresentation(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentCompletion: // request + defer logger.Debug(MethodTextDocumentCompletion, zap.Error(err)) + + var params CompletionParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.Completion(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodCompletionItemResolve: // request + defer logger.Debug(MethodCompletionItemResolve, zap.Error(err)) + + var params CompletionItem + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.CompletionResolve(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentDeclaration: // request + defer logger.Debug(MethodTextDocumentDeclaration, zap.Error(err)) + + var params DeclarationParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.Declaration(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentDefinition: // request + defer logger.Debug(MethodTextDocumentDefinition, zap.Error(err)) + + var params DefinitionParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.Definition(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentDidChange: // notification + defer logger.Debug(MethodTextDocumentDidChange, zap.Error(err)) + + var params DidChangeTextDocumentParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.DidChange(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodWorkspaceDidChangeConfiguration: // notification + defer logger.Debug(MethodWorkspaceDidChangeConfiguration, zap.Error(err)) + + var params DidChangeConfigurationParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.DidChangeConfiguration(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodWorkspaceDidChangeWatchedFiles: // notification + defer logger.Debug(MethodWorkspaceDidChangeWatchedFiles, zap.Error(err)) + + var params DidChangeWatchedFilesParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.DidChangeWatchedFiles(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodWorkspaceDidChangeWorkspaceFolders: // notification + defer logger.Debug(MethodWorkspaceDidChangeWorkspaceFolders, zap.Error(err)) + + var params DidChangeWorkspaceFoldersParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.DidChangeWorkspaceFolders(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodTextDocumentDidClose: // notification + defer logger.Debug(MethodTextDocumentDidClose, zap.Error(err)) + + var params DidCloseTextDocumentParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.DidClose(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodTextDocumentDidOpen: // notification + defer logger.Debug(MethodTextDocumentDidOpen, zap.Error(err)) + + var params DidOpenTextDocumentParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.DidOpen(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodTextDocumentDidSave: // notification + defer logger.Debug(MethodTextDocumentDidSave, zap.Error(err)) + + var params DidSaveTextDocumentParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.DidSave(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodTextDocumentDocumentColor: // request + defer logger.Debug(MethodTextDocumentDocumentColor, zap.Error(err)) + + var params DocumentColorParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.DocumentColor(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentDocumentHighlight: // request + defer logger.Debug(MethodTextDocumentDocumentHighlight, zap.Error(err)) + + var params DocumentHighlightParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.DocumentHighlight(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentDocumentLink: // request + defer logger.Debug(MethodTextDocumentDocumentLink, zap.Error(err)) + + var params DocumentLinkParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.DocumentLink(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodDocumentLinkResolve: // request + defer logger.Debug(MethodDocumentLinkResolve, zap.Error(err)) + + var params DocumentLink + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.DocumentLinkResolve(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentDocumentSymbol: // request + defer logger.Debug(MethodTextDocumentDocumentSymbol, zap.Error(err)) + + var params DocumentSymbolParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.DocumentSymbol(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodWorkspaceExecuteCommand: // request + defer logger.Debug(MethodWorkspaceExecuteCommand, zap.Error(err)) + + var params ExecuteCommandParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.ExecuteCommand(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentFoldingRange: // request + defer logger.Debug(MethodTextDocumentFoldingRange, zap.Error(err)) + + var params FoldingRangeParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.FoldingRanges(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentFormatting: // request + defer logger.Debug(MethodTextDocumentFormatting, zap.Error(err)) + + var params DocumentFormattingParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.Formatting(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentHover: // request + defer logger.Debug(MethodTextDocumentHover, zap.Error(err)) + + var params HoverParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.Hover(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentImplementation: // request + defer logger.Debug(MethodTextDocumentImplementation, zap.Error(err)) + + var params ImplementationParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.Implementation(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentOnTypeFormatting: // request + defer logger.Debug(MethodTextDocumentOnTypeFormatting, zap.Error(err)) + + var params DocumentOnTypeFormattingParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.OnTypeFormatting(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentPrepareRename: // request + defer logger.Debug(MethodTextDocumentPrepareRename, zap.Error(err)) + + var params PrepareRenameParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.PrepareRename(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentRangeFormatting: // request + defer logger.Debug(MethodTextDocumentRangeFormatting, zap.Error(err)) + + var params DocumentRangeFormattingParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.RangeFormatting(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentReferences: // request + defer logger.Debug(MethodTextDocumentReferences, zap.Error(err)) + + var params ReferenceParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.References(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentRename: // request + defer logger.Debug(MethodTextDocumentRename, zap.Error(err)) + + var params RenameParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.Rename(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentSignatureHelp: // request + defer logger.Debug(MethodTextDocumentSignatureHelp, zap.Error(err)) + + var params SignatureHelpParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.SignatureHelp(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodWorkspaceSymbol: // request + defer logger.Debug(MethodWorkspaceSymbol, zap.Error(err)) + + var params WorkspaceSymbolParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.Symbols(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentTypeDefinition: // request + defer logger.Debug(MethodTextDocumentTypeDefinition, zap.Error(err)) + + var params TypeDefinitionParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.TypeDefinition(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodTextDocumentWillSave: // notification + defer logger.Debug(MethodTextDocumentWillSave, zap.Error(err)) + + var params WillSaveTextDocumentParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.WillSave(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodTextDocumentWillSaveWaitUntil: // request + defer logger.Debug(MethodTextDocumentWillSaveWaitUntil, zap.Error(err)) + + var params WillSaveTextDocumentParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.WillSaveWaitUntil(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodShowDocument: // request + defer logger.Debug(MethodShowDocument, zap.Error(err)) + + var params ShowDocumentParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.ShowDocument(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodWillCreateFiles: // request + defer logger.Debug(MethodWillCreateFiles, zap.Error(err)) + + var params CreateFilesParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.WillCreateFiles(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodDidCreateFiles: // notification + defer logger.Debug(MethodDidCreateFiles, zap.Error(err)) + + var params CreateFilesParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.DidCreateFiles(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodWillRenameFiles: // request + defer logger.Debug(MethodWillRenameFiles, zap.Error(err)) + + var params RenameFilesParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.WillRenameFiles(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodDidRenameFiles: // notification + defer logger.Debug(MethodDidRenameFiles, zap.Error(err)) + + var params RenameFilesParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.DidRenameFiles(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodWillDeleteFiles: // request + defer logger.Debug(MethodWillDeleteFiles, zap.Error(err)) + + var params DeleteFilesParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.WillDeleteFiles(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodDidDeleteFiles: // notification + defer logger.Debug(MethodDidDeleteFiles, zap.Error(err)) + + var params DeleteFilesParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + err := server.DidDeleteFiles(ctx, ¶ms) + + return true, reply(ctx, nil, err) + + case MethodCodeLensRefresh: // request + defer logger.Debug(MethodCodeLensRefresh, zap.Error(err)) + + if len(req.Params()) > 0 { + return true, reply(ctx, nil, fmt.Errorf("expected no params: %w", jsonrpc2.ErrInvalidParams)) + } + + err := server.CodeLensRefresh(ctx) + + return true, reply(ctx, nil, err) + + case MethodTextDocumentPrepareCallHierarchy: // request + defer logger.Debug(MethodTextDocumentPrepareCallHierarchy, zap.Error(err)) + + var params CallHierarchyPrepareParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.PrepareCallHierarchy(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodCallHierarchyIncomingCalls: // request + defer logger.Debug(MethodCallHierarchyIncomingCalls, zap.Error(err)) + + var params CallHierarchyIncomingCallsParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.IncomingCalls(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodCallHierarchyOutgoingCalls: // request + defer logger.Debug(MethodCallHierarchyOutgoingCalls, zap.Error(err)) + + var params CallHierarchyOutgoingCallsParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.OutgoingCalls(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodSemanticTokensFull: // request + defer logger.Debug(MethodSemanticTokensFull, zap.Error(err)) + + var params SemanticTokensParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.SemanticTokensFull(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodSemanticTokensFullDelta: // request + defer logger.Debug(MethodSemanticTokensFullDelta, zap.Error(err)) + + var params SemanticTokensDeltaParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.SemanticTokensFullDelta(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodSemanticTokensRange: // request + defer logger.Debug(MethodSemanticTokensRange, zap.Error(err)) + + var params SemanticTokensRangeParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.SemanticTokensRange(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodSemanticTokensRefresh: // request + defer logger.Debug(MethodSemanticTokensRefresh, zap.Error(err)) + + if len(req.Params()) > 0 { + return true, reply(ctx, nil, fmt.Errorf("expected no params: %w", jsonrpc2.ErrInvalidParams)) + } + + err := server.SemanticTokensRefresh(ctx) + + return true, reply(ctx, nil, err) + + case MethodLinkedEditingRange: // request + defer logger.Debug(MethodLinkedEditingRange, zap.Error(err)) + + var params LinkedEditingRangeParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.LinkedEditingRange(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + case MethodMoniker: // request + defer logger.Debug(MethodMoniker, zap.Error(err)) + + var params MonikerParams + if err := dec.Decode(¶ms); err != nil { + return true, replyParseError(ctx, reply, err) + } + + resp, err := server.Moniker(ctx, ¶ms) + + return true, reply(ctx, resp, err) + + default: + return false, nil + } +} + +// Server represents a Language Server Protocol server. +type Server interface { + Initialize(ctx context.Context, params *InitializeParams) (result *InitializeResult, err error) + Initialized(ctx context.Context, params *InitializedParams) (err error) + Shutdown(ctx context.Context) (err error) + Exit(ctx context.Context) (err error) + WorkDoneProgressCancel(ctx context.Context, params *WorkDoneProgressCancelParams) (err error) + LogTrace(ctx context.Context, params *LogTraceParams) (err error) + SetTrace(ctx context.Context, params *SetTraceParams) (err error) + CodeAction(ctx context.Context, params *CodeActionParams) (result []CodeAction, err error) + CodeLens(ctx context.Context, params *CodeLensParams) (result []CodeLens, err error) + CodeLensResolve(ctx context.Context, params *CodeLens) (result *CodeLens, err error) + ColorPresentation(ctx context.Context, params *ColorPresentationParams) (result []ColorPresentation, err error) + Completion(ctx context.Context, params *CompletionParams) (result *CompletionList, err error) + CompletionResolve(ctx context.Context, params *CompletionItem) (result *CompletionItem, err error) + Declaration(ctx context.Context, params *DeclarationParams) (result []Location /* Declaration | DeclarationLink[] | null */, err error) + Definition(ctx context.Context, params *DefinitionParams) (result []Location /* Definition | DefinitionLink[] | null */, err error) + DidChange(ctx context.Context, params *DidChangeTextDocumentParams) (err error) + DidChangeConfiguration(ctx context.Context, params *DidChangeConfigurationParams) (err error) + DidChangeWatchedFiles(ctx context.Context, params *DidChangeWatchedFilesParams) (err error) + DidChangeWorkspaceFolders(ctx context.Context, params *DidChangeWorkspaceFoldersParams) (err error) + DidClose(ctx context.Context, params *DidCloseTextDocumentParams) (err error) + DidOpen(ctx context.Context, params *DidOpenTextDocumentParams) (err error) + DidSave(ctx context.Context, params *DidSaveTextDocumentParams) (err error) + DocumentColor(ctx context.Context, params *DocumentColorParams) (result []ColorInformation, err error) + DocumentHighlight(ctx context.Context, params *DocumentHighlightParams) (result []DocumentHighlight, err error) + DocumentLink(ctx context.Context, params *DocumentLinkParams) (result []DocumentLink, err error) + DocumentLinkResolve(ctx context.Context, params *DocumentLink) (result *DocumentLink, err error) + DocumentSymbol(ctx context.Context, params *DocumentSymbolParams) (result []interface{} /* []SymbolInformation | []DocumentSymbol */, err error) + ExecuteCommand(ctx context.Context, params *ExecuteCommandParams) (result interface{}, err error) + FoldingRanges(ctx context.Context, params *FoldingRangeParams) (result []FoldingRange, err error) + Formatting(ctx context.Context, params *DocumentFormattingParams) (result []TextEdit, err error) + Hover(ctx context.Context, params *HoverParams) (result *Hover, err error) + Implementation(ctx context.Context, params *ImplementationParams) (result []Location, err error) + OnTypeFormatting(ctx context.Context, params *DocumentOnTypeFormattingParams) (result []TextEdit, err error) + PrepareRename(ctx context.Context, params *PrepareRenameParams) (result *Range, err error) + RangeFormatting(ctx context.Context, params *DocumentRangeFormattingParams) (result []TextEdit, err error) + References(ctx context.Context, params *ReferenceParams) (result []Location, err error) + Rename(ctx context.Context, params *RenameParams) (result *WorkspaceEdit, err error) + SignatureHelp(ctx context.Context, params *SignatureHelpParams) (result *SignatureHelp, err error) + Symbols(ctx context.Context, params *WorkspaceSymbolParams) (result []SymbolInformation, err error) + TypeDefinition(ctx context.Context, params *TypeDefinitionParams) (result []Location, err error) + WillSave(ctx context.Context, params *WillSaveTextDocumentParams) (err error) + WillSaveWaitUntil(ctx context.Context, params *WillSaveTextDocumentParams) (result []TextEdit, err error) + ShowDocument(ctx context.Context, params *ShowDocumentParams) (result *ShowDocumentResult, err error) + WillCreateFiles(ctx context.Context, params *CreateFilesParams) (result *WorkspaceEdit, err error) + DidCreateFiles(ctx context.Context, params *CreateFilesParams) (err error) + WillRenameFiles(ctx context.Context, params *RenameFilesParams) (result *WorkspaceEdit, err error) + DidRenameFiles(ctx context.Context, params *RenameFilesParams) (err error) + WillDeleteFiles(ctx context.Context, params *DeleteFilesParams) (result *WorkspaceEdit, err error) + DidDeleteFiles(ctx context.Context, params *DeleteFilesParams) (err error) + CodeLensRefresh(ctx context.Context) (err error) + PrepareCallHierarchy(ctx context.Context, params *CallHierarchyPrepareParams) (result []CallHierarchyItem, err error) + IncomingCalls(ctx context.Context, params *CallHierarchyIncomingCallsParams) (result []CallHierarchyIncomingCall, err error) + OutgoingCalls(ctx context.Context, params *CallHierarchyOutgoingCallsParams) (result []CallHierarchyOutgoingCall, err error) + SemanticTokensFull(ctx context.Context, params *SemanticTokensParams) (result *SemanticTokens, err error) + SemanticTokensFullDelta(ctx context.Context, params *SemanticTokensDeltaParams) (result interface{} /* SemanticTokens | SemanticTokensDelta */, err error) + SemanticTokensRange(ctx context.Context, params *SemanticTokensRangeParams) (result *SemanticTokens, err error) + SemanticTokensRefresh(ctx context.Context) (err error) + LinkedEditingRange(ctx context.Context, params *LinkedEditingRangeParams) (result *LinkedEditingRanges, err error) + Moniker(ctx context.Context, params *MonikerParams) (result []Moniker, err error) + Request(ctx context.Context, method string, params interface{}) (result interface{}, err error) +} + +// list of server methods. +const ( + // MethodCancelRequest method name of "$/cancelRequest". + MethodCancelRequest = "$/cancelRequest" + + // MethodInitialize method name of "initialize". + MethodInitialize = "initialize" + + // MethodInitialized method name of "initialized". + MethodInitialized = "initialized" + + // MethodShutdown method name of "shutdown". + MethodShutdown = "shutdown" + + // MethodExit method name of "exit". + MethodExit = "exit" + + // MethodWorkDoneProgressCancel method name of "window/workDoneProgress/cancel". + MethodWorkDoneProgressCancel = "window/workDoneProgress/cancel" + + // MethodLogTrace method name of "$/logTrace". + MethodLogTrace = "$/logTrace" + + // MethodSetTrace method name of "$/setTrace". + MethodSetTrace = "$/setTrace" + + // MethodTextDocumentCodeAction method name of "textDocument/codeAction". + MethodTextDocumentCodeAction = "textDocument/codeAction" + + // MethodTextDocumentCodeLens method name of "textDocument/codeLens". + MethodTextDocumentCodeLens = "textDocument/codeLens" + + // MethodCodeLensResolve method name of "codeLens/resolve". + MethodCodeLensResolve = "codeLens/resolve" + + // MethodTextDocumentColorPresentation method name of "textDocument/colorPresentation". + MethodTextDocumentColorPresentation = "textDocument/colorPresentation" + + // MethodTextDocumentCompletion method name of "textDocument/completion". + MethodTextDocumentCompletion = "textDocument/completion" + + // MethodCompletionItemResolve method name of "completionItem/resolve". + MethodCompletionItemResolve = "completionItem/resolve" + + // MethodTextDocumentDeclaration method name of "textDocument/declaration". + MethodTextDocumentDeclaration = "textDocument/declaration" + + // MethodTextDocumentDefinition method name of "textDocument/definition". + MethodTextDocumentDefinition = "textDocument/definition" + + // MethodTextDocumentDidChange method name of "textDocument/didChange". + MethodTextDocumentDidChange = "textDocument/didChange" + + // MethodWorkspaceDidChangeConfiguration method name of "workspace/didChangeConfiguration". + MethodWorkspaceDidChangeConfiguration = "workspace/didChangeConfiguration" + + // MethodWorkspaceDidChangeWatchedFiles method name of "workspace/didChangeWatchedFiles". + MethodWorkspaceDidChangeWatchedFiles = "workspace/didChangeWatchedFiles" + + // MethodWorkspaceDidChangeWorkspaceFolders method name of "workspace/didChangeWorkspaceFolders". + MethodWorkspaceDidChangeWorkspaceFolders = "workspace/didChangeWorkspaceFolders" + + // MethodTextDocumentDidClose method name of "textDocument/didClose". + MethodTextDocumentDidClose = "textDocument/didClose" + + // MethodTextDocumentDidOpen method name of "textDocument/didOpen". + MethodTextDocumentDidOpen = "textDocument/didOpen" + + // MethodTextDocumentDidSave method name of "textDocument/didSave". + MethodTextDocumentDidSave = "textDocument/didSave" + + // MethodTextDocumentDocumentColor method name of"textDocument/documentColor". + MethodTextDocumentDocumentColor = "textDocument/documentColor" + + // MethodTextDocumentDocumentHighlight method name of "textDocument/documentHighlight". + MethodTextDocumentDocumentHighlight = "textDocument/documentHighlight" + + // MethodTextDocumentDocumentLink method name of "textDocument/documentLink". + MethodTextDocumentDocumentLink = "textDocument/documentLink" + + // MethodDocumentLinkResolve method name of "documentLink/resolve". + MethodDocumentLinkResolve = "documentLink/resolve" + + // MethodTextDocumentDocumentSymbol method name of "textDocument/documentSymbol". + MethodTextDocumentDocumentSymbol = "textDocument/documentSymbol" + + // MethodWorkspaceExecuteCommand method name of "workspace/executeCommand". + MethodWorkspaceExecuteCommand = "workspace/executeCommand" + + // MethodTextDocumentFoldingRange method name of "textDocument/foldingRange". + MethodTextDocumentFoldingRange = "textDocument/foldingRange" + + // MethodTextDocumentFormatting method name of "textDocument/formatting". + MethodTextDocumentFormatting = "textDocument/formatting" + + // MethodTextDocumentHover method name of "textDocument/hover". + MethodTextDocumentHover = "textDocument/hover" + + // MethodTextDocumentImplementation method name of "textDocument/implementation". + MethodTextDocumentImplementation = "textDocument/implementation" + + // MethodTextDocumentOnTypeFormatting method name of "textDocument/onTypeFormatting". + MethodTextDocumentOnTypeFormatting = "textDocument/onTypeFormatting" + + // MethodTextDocumentPrepareRename method name of "textDocument/prepareRename". + MethodTextDocumentPrepareRename = "textDocument/prepareRename" + + // MethodTextDocumentRangeFormatting method name of "textDocument/rangeFormatting". + MethodTextDocumentRangeFormatting = "textDocument/rangeFormatting" + + // MethodTextDocumentReferences method name of "textDocument/references". + MethodTextDocumentReferences = "textDocument/references" + + // MethodTextDocumentRename method name of "textDocument/rename". + MethodTextDocumentRename = "textDocument/rename" + + // MethodTextDocumentSignatureHelp method name of "textDocument/signatureHelp". + MethodTextDocumentSignatureHelp = "textDocument/signatureHelp" + + // MethodWorkspaceSymbol method name of "workspace/symbol". + MethodWorkspaceSymbol = "workspace/symbol" + + // MethodTextDocumentTypeDefinition method name of "textDocument/typeDefinition". + MethodTextDocumentTypeDefinition = "textDocument/typeDefinition" + + // MethodTextDocumentWillSave method name of "textDocument/willSave". + MethodTextDocumentWillSave = "textDocument/willSave" + + // MethodTextDocumentWillSaveWaitUntil method name of "textDocument/willSaveWaitUntil". + MethodTextDocumentWillSaveWaitUntil = "textDocument/willSaveWaitUntil" + + // MethodShowDocument method name of "window/showDocument". + MethodShowDocument = "window/showDocument" + + // MethodWillCreateFiles method name of "workspace/willCreateFiles". + MethodWillCreateFiles = "workspace/willCreateFiles" + + // MethodDidCreateFiles method name of "workspace/didCreateFiles". + MethodDidCreateFiles = "workspace/didCreateFiles" + + // MethodWillRenameFiles method name of "workspace/willRenameFiles". + MethodWillRenameFiles = "workspace/willRenameFiles" + + // MethodDidRenameFiles method name of "workspace/didRenameFiles". + MethodDidRenameFiles = "workspace/didRenameFiles" + + // MethodWillDeleteFiles method name of "workspace/willDeleteFiles". + MethodWillDeleteFiles = "workspace/willDeleteFiles" + + // MethodDidDeleteFiles method name of "workspace/didDeleteFiles". + MethodDidDeleteFiles = "workspace/didDeleteFiles" + + // MethodCodeLensRefresh method name of "workspace/codeLens/refresh". + MethodCodeLensRefresh = "workspace/codeLens/refresh" + + // MethodTextDocumentPrepareCallHierarchy method name of "textDocument/prepareCallHierarchy". + MethodTextDocumentPrepareCallHierarchy = "textDocument/prepareCallHierarchy" + + // MethodCallHierarchyIncomingCalls method name of "callHierarchy/incomingCalls". + MethodCallHierarchyIncomingCalls = "callHierarchy/incomingCalls" + + // MethodCallHierarchyOutgoingCalls method name of "callHierarchy/outgoingCalls". + MethodCallHierarchyOutgoingCalls = "callHierarchy/outgoingCalls" + + // MethodSemanticTokensFull method name of "textDocument/semanticTokens/full". + MethodSemanticTokensFull = "textDocument/semanticTokens/full" + + // MethodSemanticTokensFullDelta method name of "textDocument/semanticTokens/full/delta". + MethodSemanticTokensFullDelta = "textDocument/semanticTokens/full/delta" + + // MethodSemanticTokensRange method name of "textDocument/semanticTokens/range". + MethodSemanticTokensRange = "textDocument/semanticTokens/range" + + // MethodSemanticTokensRefresh method name of "workspace/semanticTokens/refresh". + MethodSemanticTokensRefresh = "workspace/semanticTokens/refresh" + + // MethodLinkedEditingRange method name of "textDocument/linkedEditingRange". + MethodLinkedEditingRange = "textDocument/linkedEditingRange" + + // MethodMoniker method name of "textDocument/moniker". + MethodMoniker = "textDocument/moniker" +) + +// server implements a Language Server Protocol server. +type server struct { + jsonrpc2.Conn + + logger *zap.Logger +} + +var _ Server = (*server)(nil) + +// Initialize sents the request as the first request from the client to the server. +// +// If the server receives a request or notification before the initialize request it should act as follows: +// +// - For a request the response should be an error with code: -32002. The message can be picked by the server. +// - Notifications should be dropped, except for the exit notification. This will allow the exit of a server without an initialize request. +// +// Until the server has responded to the initialize request with an InitializeResult, the client +// must not send any additional requests or notifications to the server. +// In addition the server is not allowed to send any requests or notifications to the client until +// it has responded with an InitializeResult, with the exception that during the initialize request +// the server is allowed to send the notifications window/showMessage, window/logMessage and telemetry/event +// as well as the window/showMessageRequest request to the client. +func (s *server) Initialize(ctx context.Context, params *InitializeParams) (_ *InitializeResult, err error) { + s.logger.Debug("call " + MethodInitialize) + defer s.logger.Debug("end "+MethodInitialize, zap.Error(err)) + + var result *InitializeResult + if err := Call(ctx, s.Conn, MethodInitialize, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// Initialized sends the notification from the client to the server after the client received the result of the +// initialize request but before the client is sending any other request or notification to the server. +// +// The server can use the initialized notification for example to dynamically register capabilities. +// The initialized notification may only be sent once. +func (s *server) Initialized(ctx context.Context, params *InitializedParams) (err error) { + s.logger.Debug("notify " + MethodInitialized) + defer s.logger.Debug("end "+MethodInitialized, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodInitialized, params) +} + +// Shutdown sents the request from the client to the server. +// +// It asks the server to shut down, but to not exit (otherwise the response might not be delivered correctly to the client). +// There is a separate exit notification that asks the server to exit. +// +// Clients must not sent any notifications other than `exit` or requests to a server to which they have sent a shutdown requests. +// If a server receives requests after a shutdown request those requests should be errored with `InvalidRequest`. +func (s *server) Shutdown(ctx context.Context) (err error) { + s.logger.Debug("call " + MethodShutdown) + defer s.logger.Debug("end "+MethodShutdown, zap.Error(err)) + + return Call(ctx, s.Conn, MethodShutdown, nil, nil) +} + +// Exit a notification to ask the server to exit its process. +// +// The server should exit with success code 0 if the shutdown request has been received before; otherwise with error code 1. +func (s *server) Exit(ctx context.Context) (err error) { + s.logger.Debug("notify " + MethodExit) + defer s.logger.Debug("end "+MethodExit, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodExit, nil) +} + +// LogTrace a notification to log the trace of the server’s execution. +// +// The amount and content of these notifications depends on the current trace configuration. +// +// If trace is "off", the server should not send any logTrace notification. If trace is "message", +// the server should not add the "verbose" field in the LogTraceParams. +// +// @since 3.16.0. +func (s *server) LogTrace(ctx context.Context, params *LogTraceParams) (err error) { + s.logger.Debug("notify " + MethodLogTrace) + defer s.logger.Debug("end "+MethodLogTrace, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodLogTrace, params) +} + +// SetTrace a notification that should be used by the client to modify the trace setting of the server. +// +// @since 3.16.0. +func (s *server) SetTrace(ctx context.Context, params *SetTraceParams) (err error) { + s.logger.Debug("notify " + MethodSetTrace) + defer s.logger.Debug("end "+MethodSetTrace, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodSetTrace, params) +} + +// WorkDoneProgressCancel is the sends notification from the client to the server to cancel a progress initiated on the +// server side using the "window/workDoneProgress/create". +func (s *server) WorkDoneProgressCancel(ctx context.Context, params *WorkDoneProgressCancelParams) (err error) { + s.logger.Debug("call " + MethodWorkDoneProgressCancel) + defer s.logger.Debug("end "+MethodWorkDoneProgressCancel, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodWorkDoneProgressCancel, params) +} + +// CodeAction sends the request is from the client to the server to compute commands for a given text document and range. +// +// These commands are typically code fixes to either fix problems or to beautify/refactor code. The result of a `textDocument/codeAction` +// request is an array of `Command` literals which are typically presented in the user interface. +// +// To ensure that a server is useful in many clients the commands specified in a code actions should be handled by the +// server and not by the client (see `workspace/executeCommand` and `ServerCapabilities.executeCommandProvider`). +// If the client supports providing edits with a code action then the mode should be used. +func (s *server) CodeAction(ctx context.Context, params *CodeActionParams) (result []CodeAction, err error) { + s.logger.Debug("call " + MethodTextDocumentCodeAction) + defer s.logger.Debug("end "+MethodTextDocumentCodeAction, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentCodeAction, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// CodeLens sends the request from the client to the server to compute code lenses for a given text document. +func (s *server) CodeLens(ctx context.Context, params *CodeLensParams) (result []CodeLens, err error) { + s.logger.Debug("call " + MethodTextDocumentCodeLens) + defer s.logger.Debug("end "+MethodTextDocumentCodeLens, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentCodeLens, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// CodeLensResolve sends the request from the client to the server to resolve the command for a given code lens item. +func (s *server) CodeLensResolve(ctx context.Context, params *CodeLens) (_ *CodeLens, err error) { + s.logger.Debug("call " + MethodCodeLensResolve) + defer s.logger.Debug("end "+MethodCodeLensResolve, zap.Error(err)) + + var result *CodeLens + if err := Call(ctx, s.Conn, MethodCodeLensResolve, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// ColorPresentation sends the request from the client to the server to obtain a list of presentations for a color value at a given location. +// +// Clients can use the result to +// +// - modify a color reference. +// - show in a color picker and let users pick one of the presentations. +func (s *server) ColorPresentation(ctx context.Context, params *ColorPresentationParams) (result []ColorPresentation, err error) { + s.logger.Debug("call " + MethodTextDocumentColorPresentation) + defer s.logger.Debug("end "+MethodTextDocumentColorPresentation, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentColorPresentation, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// Completion sends the request from the client to the server to compute completion items at a given cursor position. +// +// Completion items are presented in the IntelliSense user interface. +// If computing full completion items is expensive, servers can additionally provide a handler for the completion item resolve request (‘completionItem/resolve’). +// +// This request is sent when a completion item is selected in the user interface. +// A typical use case is for example: the ‘textDocument/completion’ request doesn’t fill in the documentation property +// for returned completion items since it is expensive to compute. When the item is selected in the user interface then +// a ‘completionItem/resolve’ request is sent with the selected completion item as a parameter. +// +// The returned completion item should have the documentation property filled in. The request can delay the computation of +// the `detail` and `documentation` properties. However, properties that are needed for the initial sorting and filtering, +// like `sortText`, `filterText`, `insertText`, and `textEdit` must be provided in the `textDocument/completion` response and must not be changed during resolve. +func (s *server) Completion(ctx context.Context, params *CompletionParams) (_ *CompletionList, err error) { + s.logger.Debug("call " + MethodTextDocumentCompletion) + defer s.logger.Debug("end "+MethodTextDocumentCompletion, zap.Error(err)) + + var result *CompletionList + if err := Call(ctx, s.Conn, MethodTextDocumentCompletion, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// CompletionResolve sends the request from the client to the server to resolve additional information for a given completion item. +func (s *server) CompletionResolve(ctx context.Context, params *CompletionItem) (_ *CompletionItem, err error) { + s.logger.Debug("call " + MethodCompletionItemResolve) + defer s.logger.Debug("end "+MethodCompletionItemResolve, zap.Error(err)) + + var result *CompletionItem + if err := Call(ctx, s.Conn, MethodCompletionItemResolve, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// Declaration sends the request from the client to the server to resolve the declaration location of a symbol at a given text document position. +// +// The result type LocationLink[] got introduce with version 3.14.0 and depends in the corresponding client capability `clientCapabilities.textDocument.declaration.linkSupport`. +// +// @since 3.14.0. +func (s *server) Declaration(ctx context.Context, params *DeclarationParams) (result []Location, err error) { + s.logger.Debug("call " + MethodTextDocumentDeclaration) + defer s.logger.Debug("end "+MethodTextDocumentDeclaration, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentDeclaration, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// Definition sends the request from the client to the server to resolve the definition location of a symbol at a given text document position. +// +// The result type `[]LocationLink` got introduce with version 3.14.0 and depends in the corresponding client capability `clientCapabilities.textDocument.definition.linkSupport`. +// +// @since 3.14.0. +func (s *server) Definition(ctx context.Context, params *DefinitionParams) (result []Location, err error) { + s.logger.Debug("call " + MethodTextDocumentDefinition) + defer s.logger.Debug("end "+MethodTextDocumentDefinition, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentDefinition, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// DidChange sends the notification from the client to the server to signal changes to a text document. +// +// In 2.0 the shape of the params has changed to include proper version numbers and language ids. +func (s *server) DidChange(ctx context.Context, params *DidChangeTextDocumentParams) (err error) { + s.logger.Debug("notify " + MethodTextDocumentDidChange) + defer s.logger.Debug("end "+MethodTextDocumentDidChange, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodTextDocumentDidChange, params) +} + +// DidChangeConfiguration sends the notification from the client to the server to signal the change of configuration settings. +func (s *server) DidChangeConfiguration(ctx context.Context, params *DidChangeConfigurationParams) (err error) { + s.logger.Debug("call " + MethodWorkspaceDidChangeConfiguration) + defer s.logger.Debug("end "+MethodWorkspaceDidChangeConfiguration, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodWorkspaceDidChangeConfiguration, params) +} + +// DidChangeWatchedFiles sends the notification from the client to the server when the client detects changes to files watched by the language client. +// +// It is recommended that servers register for these file events using the registration mechanism. +// In former implementations clients pushed file events without the server actively asking for it. +func (s *server) DidChangeWatchedFiles(ctx context.Context, params *DidChangeWatchedFilesParams) (err error) { + s.logger.Debug("call " + MethodWorkspaceDidChangeWatchedFiles) + defer s.logger.Debug("end "+MethodWorkspaceDidChangeWatchedFiles, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodWorkspaceDidChangeWatchedFiles, params) +} + +// DidChangeWorkspaceFolders sents the notification from the client to the server to inform the server about workspace folder configuration changes. +// +// The notification is sent by default if both ServerCapabilities/workspace/workspaceFolders and ClientCapabilities/workspace/workspaceFolders are true; +// or if the server has registered itself to receive this notification. +// To register for the workspace/didChangeWorkspaceFolders send a client/registerCapability request from the server to the client. +// +// The registration parameter must have a registrations item of the following form, where id is a unique id used to unregister the capability (the example uses a UUID). +func (s *server) DidChangeWorkspaceFolders(ctx context.Context, params *DidChangeWorkspaceFoldersParams) (err error) { + s.logger.Debug("call " + MethodWorkspaceDidChangeWorkspaceFolders) + defer s.logger.Debug("end "+MethodWorkspaceDidChangeWorkspaceFolders, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodWorkspaceDidChangeWorkspaceFolders, params) +} + +// DidClose sends the notification from the client to the server when the document got closed in the client. +// +// The document’s truth now exists where the document’s Uri points to (e.g. if the document’s Uri is a file Uri the truth now exists on disk). +// As with the open notification the close notification is about managing the document’s content. +// Receiving a close notification doesn’t mean that the document was open in an editor before. +// +// A close notification requires a previous open notification to be sent. +// Note that a server’s ability to fulfill requests is independent of whether a text document is open or closed. +func (s *server) DidClose(ctx context.Context, params *DidCloseTextDocumentParams) (err error) { + s.logger.Debug("call " + MethodTextDocumentDidClose) + defer s.logger.Debug("end "+MethodTextDocumentDidClose, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodTextDocumentDidClose, params) +} + +// DidOpen sends the open notification from the client to the server to signal newly opened text documents. +// +// The document’s truth is now managed by the client and the server must not try to read the document’s truth using the document’s Uri. +// Open in this sense means it is managed by the client. It doesn’t necessarily mean that its content is presented in an editor. +// +// An open notification must not be sent more than once without a corresponding close notification send before. +// This means open and close notification must be balanced and the max open count for a particular textDocument is one. +// Note that a server’s ability to fulfill requests is independent of whether a text document is open or closed. +func (s *server) DidOpen(ctx context.Context, params *DidOpenTextDocumentParams) (err error) { + s.logger.Debug("call " + MethodTextDocumentDidOpen) + defer s.logger.Debug("end "+MethodTextDocumentDidOpen, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodTextDocumentDidOpen, params) +} + +// DidSave sends the notification from the client to the server when the document was saved in the client. +func (s *server) DidSave(ctx context.Context, params *DidSaveTextDocumentParams) (err error) { + s.logger.Debug("call " + MethodTextDocumentDidSave) + defer s.logger.Debug("end "+MethodTextDocumentDidSave, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodTextDocumentDidSave, params) +} + +// DocumentColor sends the request from the client to the server to list all color references found in a given text document. +// +// Along with the range, a color value in RGB is returned. +// +// Clients can use the result to decorate color references in an editor. +// For example: +// +// - Color boxes showing the actual color next to the reference +// - Show a color picker when a color reference is edited. +func (s *server) DocumentColor(ctx context.Context, params *DocumentColorParams) (result []ColorInformation, err error) { + s.logger.Debug("call " + MethodTextDocumentDocumentColor) + defer s.logger.Debug("end "+MethodTextDocumentDocumentColor, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentDocumentColor, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// DocumentHighlight sends the request is from the client to the server to resolve a document highlights for a given text document position. +// +// For programming languages this usually highlights all references to the symbol scoped to this file. +// However we kept ‘textDocument/documentHighlight’ and ‘textDocument/references’ separate requests since the first one is allowed to be more fuzzy. +// +// Symbol matches usually have a `DocumentHighlightKind` of `Read` or `Write` whereas fuzzy or textual matches use `Text` as the kind. +func (s *server) DocumentHighlight(ctx context.Context, params *DocumentHighlightParams) (result []DocumentHighlight, err error) { + s.logger.Debug("call " + MethodTextDocumentDocumentHighlight) + defer s.logger.Debug("end "+MethodTextDocumentDocumentHighlight, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentDocumentHighlight, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// DocumentLink sends the request from the client to the server to request the location of links in a document. +func (s *server) DocumentLink(ctx context.Context, params *DocumentLinkParams) (result []DocumentLink, err error) { + s.logger.Debug("call " + MethodTextDocumentDocumentLink) + defer s.logger.Debug("end "+MethodTextDocumentDocumentLink, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentDocumentLink, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// DocumentLinkResolve sends the request from the client to the server to resolve the target of a given document link. +func (s *server) DocumentLinkResolve(ctx context.Context, params *DocumentLink) (_ *DocumentLink, err error) { + s.logger.Debug("call " + MethodDocumentLinkResolve) + defer s.logger.Debug("end "+MethodDocumentLinkResolve, zap.Error(err)) + + var result *DocumentLink + if err := Call(ctx, s.Conn, MethodDocumentLinkResolve, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// DocumentSymbol sends the request from the client to the server to return a flat list of all symbols found in a given text document. +// +// Neither the symbol’s location range nor the symbol’s container name should be used to infer a hierarchy. +func (s *server) DocumentSymbol(ctx context.Context, params *DocumentSymbolParams) (result []interface{}, err error) { + s.logger.Debug("call " + MethodTextDocumentDocumentSymbol) + defer s.logger.Debug("end "+MethodTextDocumentDocumentSymbol, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentDocumentSymbol, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// ExecuteCommand sends the request from the client to the server to trigger command execution on the server. +// +// In most cases the server creates a `WorkspaceEdit` structure and applies the changes to the workspace using the +// request `workspace/applyEdit` which is sent from the server to the client. +func (s *server) ExecuteCommand(ctx context.Context, params *ExecuteCommandParams) (result interface{}, err error) { + s.logger.Debug("call " + MethodWorkspaceExecuteCommand) + defer s.logger.Debug("end "+MethodWorkspaceExecuteCommand, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodWorkspaceExecuteCommand, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// FoldingRanges sends the request from the client to the server to return all folding ranges found in a given text document. +// +// @since version 3.10.0. +func (s *server) FoldingRanges(ctx context.Context, params *FoldingRangeParams) (result []FoldingRange, err error) { + s.logger.Debug("call " + MethodTextDocumentFoldingRange) + defer s.logger.Debug("end "+MethodTextDocumentFoldingRange, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentFoldingRange, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// Formatting sends the request from the client to the server to format a whole document. +func (s *server) Formatting(ctx context.Context, params *DocumentFormattingParams) (result []TextEdit, err error) { + s.logger.Debug("call " + MethodTextDocumentFormatting) + defer s.logger.Debug("end "+MethodTextDocumentFormatting, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentFormatting, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// Hover sends the request is from the client to the server to request hover information at a given text document position. +func (s *server) Hover(ctx context.Context, params *HoverParams) (_ *Hover, err error) { + s.logger.Debug("call " + MethodTextDocumentHover) + defer s.logger.Debug("end "+MethodTextDocumentHover, zap.Error(err)) + + var result *Hover + if err := Call(ctx, s.Conn, MethodTextDocumentHover, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// Implementation sends the request from the client to the server to resolve the implementation location of a symbol at a given text document position. +// +// The result type `[]LocationLink` got introduce with version 3.14.0 and depends in the corresponding client capability `clientCapabilities.implementation.typeDefinition.linkSupport`. +func (s *server) Implementation(ctx context.Context, params *ImplementationParams) (result []Location, err error) { + s.logger.Debug("call " + MethodTextDocumentImplementation) + defer s.logger.Debug("end "+MethodTextDocumentImplementation, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentImplementation, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// OnTypeFormatting sends the request from the client to the server to format parts of the document during typing. +func (s *server) OnTypeFormatting(ctx context.Context, params *DocumentOnTypeFormattingParams) (result []TextEdit, err error) { + s.logger.Debug("call " + MethodTextDocumentOnTypeFormatting) + defer s.logger.Debug("end "+MethodTextDocumentOnTypeFormatting, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentOnTypeFormatting, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// PrepareRename sends the request from the client to the server to setup and test the validity of a rename operation at a given location. +// +// @since version 3.12.0. +func (s *server) PrepareRename(ctx context.Context, params *PrepareRenameParams) (result *Range, err error) { + s.logger.Debug("call " + MethodTextDocumentPrepareRename) + defer s.logger.Debug("end "+MethodTextDocumentPrepareRename, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentPrepareRename, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// RangeFormatting sends the request from the client to the server to format a given range in a document. +func (s *server) RangeFormatting(ctx context.Context, params *DocumentRangeFormattingParams) (result []TextEdit, err error) { + s.logger.Debug("call " + MethodTextDocumentRangeFormatting) + defer s.logger.Debug("end "+MethodTextDocumentRangeFormatting, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentRangeFormatting, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// References sends the request from the client to the server to resolve project-wide references for the symbol denoted by the given text document position. +func (s *server) References(ctx context.Context, params *ReferenceParams) (result []Location, err error) { + s.logger.Debug("call " + MethodTextDocumentReferences) + defer s.logger.Debug("end "+MethodTextDocumentReferences, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentReferences, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// Rename sends the request from the client to the server to perform a workspace-wide rename of a symbol. +func (s *server) Rename(ctx context.Context, params *RenameParams) (result *WorkspaceEdit, err error) { + s.logger.Debug("call " + MethodTextDocumentRename) + defer s.logger.Debug("end "+MethodTextDocumentRename, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentRename, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// SignatureHelp sends the request from the client to the server to request signature information at a given cursor position. +func (s *server) SignatureHelp(ctx context.Context, params *SignatureHelpParams) (_ *SignatureHelp, err error) { + s.logger.Debug("call " + MethodTextDocumentSignatureHelp) + defer s.logger.Debug("end "+MethodTextDocumentSignatureHelp, zap.Error(err)) + + var result *SignatureHelp + if err := Call(ctx, s.Conn, MethodTextDocumentSignatureHelp, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// Symbols sends the request from the client to the server to list project-wide symbols matching the query string. +func (s *server) Symbols(ctx context.Context, params *WorkspaceSymbolParams) (result []SymbolInformation, err error) { + s.logger.Debug("call " + MethodWorkspaceSymbol) + defer s.logger.Debug("end "+MethodWorkspaceSymbol, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodWorkspaceSymbol, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// TypeDefinition sends the request from the client to the server to resolve the type definition location of a symbol at a given text document position. +// +// The result type `[]LocationLink` got introduce with version 3.14.0 and depends in the corresponding client capability `clientCapabilities.textDocument.typeDefinition.linkSupport`. +// +// @since version 3.6.0. +func (s *server) TypeDefinition(ctx context.Context, params *TypeDefinitionParams) (result []Location, err error) { + s.logger.Debug("call " + MethodTextDocumentTypeDefinition) + defer s.logger.Debug("end "+MethodTextDocumentTypeDefinition, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentTypeDefinition, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// WillSave sends the notification from the client to the server before the document is actually saved. +func (s *server) WillSave(ctx context.Context, params *WillSaveTextDocumentParams) (err error) { + s.logger.Debug("call " + MethodTextDocumentWillSave) + defer s.logger.Debug("end "+MethodTextDocumentWillSave, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodTextDocumentWillSave, params) +} + +// WillSaveWaitUntil sends the request from the client to the server before the document is actually saved. +// +// The request can return an array of TextEdits which will be applied to the text document before it is saved. +// Please note that clients might drop results if computing the text edits took too long or if a server constantly fails on this request. +// This is done to keep the save fast and reliable. +func (s *server) WillSaveWaitUntil(ctx context.Context, params *WillSaveTextDocumentParams) (result []TextEdit, err error) { + s.logger.Debug("call " + MethodTextDocumentWillSaveWaitUntil) + defer s.logger.Debug("end "+MethodTextDocumentWillSaveWaitUntil, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentWillSaveWaitUntil, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// ShowDocument sends the request from a server to a client to ask the client to display a particular document in the user interface. +// +// @since 3.16.0. +func (s *server) ShowDocument(ctx context.Context, params *ShowDocumentParams) (result *ShowDocumentResult, err error) { + s.logger.Debug("call " + MethodShowDocument) + defer s.logger.Debug("end "+MethodShowDocument, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodShowDocument, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// WillCreateFiles sends the will create files request is sent from the client to the server before files are actually created as long as the creation is triggered from within the client. +// +// The request can return a WorkspaceEdit which will be applied to workspace before the files are created. +// +// Please note that clients might drop results if computing the edit took too long or if a server constantly fails on this request. This is done to keep creates fast and reliable. +// +// @since 3.16.0. +func (s *server) WillCreateFiles(ctx context.Context, params *CreateFilesParams) (result *WorkspaceEdit, err error) { + s.logger.Debug("call " + MethodWillCreateFiles) + defer s.logger.Debug("end "+MethodWillCreateFiles, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodWillCreateFiles, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// DidCreateFiles sends the did create files notification is sent from the client to the server when files were created from within the client. +// +// @since 3.16.0. +func (s *server) DidCreateFiles(ctx context.Context, params *CreateFilesParams) (err error) { + s.logger.Debug("call " + MethodDidCreateFiles) + defer s.logger.Debug("end "+MethodDidCreateFiles, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodDidCreateFiles, params) +} + +// WillRenameFiles sends the will rename files request is sent from the client to the server before files are actually renamed as long as the rename is triggered from within the client. +// +// The request can return a WorkspaceEdit which will be applied to workspace before the files are renamed. +// +// Please note that clients might drop results if computing the edit took too long or if a server constantly fails on this request. This is done to keep renames fast and reliable. +// +// @since 3.16.0. +func (s *server) WillRenameFiles(ctx context.Context, params *RenameFilesParams) (result *WorkspaceEdit, err error) { + s.logger.Debug("call " + MethodWillRenameFiles) + defer s.logger.Debug("end "+MethodWillRenameFiles, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodWillRenameFiles, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// DidRenameFiles sends the did rename files notification is sent from the client to the server when files were renamed from within the client. +// +// @since 3.16.0. +func (s *server) DidRenameFiles(ctx context.Context, params *RenameFilesParams) (err error) { + s.logger.Debug("call " + MethodDidRenameFiles) + defer s.logger.Debug("end "+MethodDidRenameFiles, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodDidRenameFiles, params) +} + +// WillDeleteFiles sends the will delete files request is sent from the client to the server before files are actually deleted as long as the deletion is triggered from within the client. +// +// The request can return a WorkspaceEdit which will be applied to workspace before the files are deleted. +// +// Please note that clients might drop results if computing the edit took too long or if a server constantly fails on this request. This is done to keep deletes fast and reliable. +// +// @since 3.16.0. +func (s *server) WillDeleteFiles(ctx context.Context, params *DeleteFilesParams) (result *WorkspaceEdit, err error) { + s.logger.Debug("call " + MethodWillDeleteFiles) + defer s.logger.Debug("end "+MethodWillDeleteFiles, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodWillDeleteFiles, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// DidDeleteFiles sends the did delete files notification is sent from the client to the server when files were deleted from within the client. +// +// @since 3.16.0. +func (s *server) DidDeleteFiles(ctx context.Context, params *DeleteFilesParams) (err error) { + s.logger.Debug("call " + MethodDidDeleteFiles) + defer s.logger.Debug("end "+MethodDidDeleteFiles, zap.Error(err)) + + return s.Conn.Notify(ctx, MethodDidDeleteFiles, params) +} + +// CodeLensRefresh sent from the server to the client. +// +// Servers can use it to ask clients to refresh the code lenses currently shown in editors. +// As a result the client should ask the server to recompute the code lenses for these editors. +// This is useful if a server detects a configuration change which requires a re-calculation of all code lenses. +// +// Note that the client still has the freedom to delay the re-calculation of the code lenses if for example an editor is currently not visible. +// +// @since 3.16.0. +func (s *server) CodeLensRefresh(ctx context.Context) (err error) { + s.logger.Debug("call " + MethodCodeLensRefresh) + defer s.logger.Debug("end "+MethodCodeLensRefresh, zap.Error(err)) + + return Call(ctx, s.Conn, MethodCodeLensRefresh, nil, nil) +} + +// PrepareCallHierarchy sent from the client to the server to return a call hierarchy for the language element of given text document positions. +// +// The call hierarchy requests are executed in two steps: +// 1. first a call hierarchy item is resolved for the given text document position +// 2. for a call hierarchy item the incoming or outgoing call hierarchy items are resolved. +// +// @since 3.16.0. +func (s *server) PrepareCallHierarchy(ctx context.Context, params *CallHierarchyPrepareParams) (result []CallHierarchyItem, err error) { + s.logger.Debug("call " + MethodTextDocumentPrepareCallHierarchy) + defer s.logger.Debug("end "+MethodTextDocumentPrepareCallHierarchy, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodTextDocumentPrepareCallHierarchy, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// IncomingCalls is the request is sent from the client to the server to resolve incoming calls for a given call hierarchy item. +// +// The request doesn’t define its own client and server capabilities. It is only issued if a server registers for the "textDocument/prepareCallHierarchy" request. +// +// @since 3.16.0. +func (s *server) IncomingCalls(ctx context.Context, params *CallHierarchyIncomingCallsParams) (result []CallHierarchyIncomingCall, err error) { + s.logger.Debug("call " + MethodCallHierarchyIncomingCalls) + defer s.logger.Debug("end "+MethodCallHierarchyIncomingCalls, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodCallHierarchyIncomingCalls, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// OutgoingCalls is the request is sent from the client to the server to resolve outgoing calls for a given call hierarchy item. +// +// The request doesn’t define its own client and server capabilities. It is only issued if a server registers for the "textDocument/prepareCallHierarchy" request. +// +// @since 3.16.0. +func (s *server) OutgoingCalls(ctx context.Context, params *CallHierarchyOutgoingCallsParams) (result []CallHierarchyOutgoingCall, err error) { + s.logger.Debug("call " + MethodCallHierarchyOutgoingCalls) + defer s.logger.Debug("end "+MethodCallHierarchyOutgoingCalls, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodCallHierarchyOutgoingCalls, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// SemanticTokensFull is the request is sent from the client to the server to resolve semantic tokens for a given file. +// +// Semantic tokens are used to add additional color information to a file that depends on language specific symbol information. +// +// A semantic token request usually produces a large result. The protocol therefore supports encoding tokens with numbers. +// +// @since 3.16.0. +func (s *server) SemanticTokensFull(ctx context.Context, params *SemanticTokensParams) (result *SemanticTokens, err error) { + s.logger.Debug("call " + MethodSemanticTokensFull) + defer s.logger.Debug("end "+MethodSemanticTokensFull, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodSemanticTokensFull, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// SemanticTokensFullDelta is the request is sent from the client to the server to resolve semantic token delta for a given file. +// +// Semantic tokens are used to add additional color information to a file that depends on language specific symbol information. +// +// A semantic token request usually produces a large result. The protocol therefore supports encoding tokens with numbers. +// +// @since 3.16.0. +func (s *server) SemanticTokensFullDelta(ctx context.Context, params *SemanticTokensDeltaParams) (result interface{}, err error) { + s.logger.Debug("call " + MethodSemanticTokensFullDelta) + defer s.logger.Debug("end "+MethodSemanticTokensFullDelta, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodSemanticTokensFullDelta, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// SemanticTokensRange is the request is sent from the client to the server to resolve semantic token delta for a given file. +// +// When a user opens a file it can be beneficial to only compute the semantic tokens for the visible range (faster rendering of the tokens in the user interface). +// If a server can compute these tokens faster than for the whole file it can provide a handler for the "textDocument/semanticTokens/range" request to handle this case special. +// +// Please note that if a client also announces that it will send the "textDocument/semanticTokens/range" server should implement this request as well to allow for flicker free scrolling and semantic coloring of a minimap. +// +// @since 3.16.0. +func (s *server) SemanticTokensRange(ctx context.Context, params *SemanticTokensRangeParams) (result *SemanticTokens, err error) { + s.logger.Debug("call " + MethodSemanticTokensRange) + defer s.logger.Debug("end "+MethodSemanticTokensRange, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodSemanticTokensRange, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// SemanticTokensRefresh is sent from the server to the client. Servers can use it to ask clients to refresh the editors for which this server provides semantic tokens. +// +// As a result the client should ask the server to recompute the semantic tokens for these editors. +// This is useful if a server detects a project wide configuration change which requires a re-calculation of all semantic tokens. +// +// Note that the client still has the freedom to delay the re-calculation of the semantic tokens if for example an editor is currently not visible. +// +// @since 3.16.0. +func (s *server) SemanticTokensRefresh(ctx context.Context) (err error) { + s.logger.Debug("call " + MethodSemanticTokensRefresh) + defer s.logger.Debug("end "+MethodSemanticTokensRefresh, zap.Error(err)) + + return Call(ctx, s.Conn, MethodSemanticTokensRefresh, nil, nil) +} + +// LinkedEditingRange is the linked editing request is sent from the client to the server to return for a given position in a document the range of the symbol at the position and all ranges that have the same content. +// +// Optionally a word pattern can be returned to describe valid contents. +// +// A rename to one of the ranges can be applied to all other ranges if the new content is valid. If no result-specific word pattern is provided, the word pattern from the client’s language configuration is used. +// +// @since 3.16.0. +func (s *server) LinkedEditingRange(ctx context.Context, params *LinkedEditingRangeParams) (result *LinkedEditingRanges, err error) { + s.logger.Debug("call " + MethodLinkedEditingRange) + defer s.logger.Debug("end "+MethodLinkedEditingRange, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodLinkedEditingRange, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// Moniker is the request is sent from the client to the server to get the symbol monikers for a given text document position. +// +// An array of Moniker types is returned as response to indicate possible monikers at the given location. +// +// If no monikers can be calculated, an empty array or null should be returned. +// +// @since 3.16.0. +func (s *server) Moniker(ctx context.Context, params *MonikerParams) (result []Moniker, err error) { + s.logger.Debug("call " + MethodMoniker) + defer s.logger.Debug("end "+MethodMoniker, zap.Error(err)) + + if err := Call(ctx, s.Conn, MethodMoniker, params, &result); err != nil { + return nil, err + } + + return result, nil +} + +// Request sends a request from the client to the server that non-compliant with the Language Server Protocol specifications. +func (s *server) Request(ctx context.Context, method string, params interface{}) (interface{}, error) { + s.logger.Debug("call " + method) + defer s.logger.Debug("end " + method) + + var result interface{} + if err := Call(ctx, s.Conn, method, params, &result); err != nil { + return nil, err + } + + return result, nil +} diff --git a/vendor/go.lsp.dev/protocol/text.go b/vendor/go.lsp.dev/protocol/text.go new file mode 100644 index 00000000000..34aec1bb5bc --- /dev/null +++ b/vendor/go.lsp.dev/protocol/text.go @@ -0,0 +1,111 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import ( + "strconv" +) + +// DidOpenTextDocumentParams params of DidOpenTextDocument notification. +type DidOpenTextDocumentParams struct { + // TextDocument is the document that was opened. + TextDocument TextDocumentItem `json:"textDocument"` +} + +// DidChangeTextDocumentParams params of DidChangeTextDocument notification. +type DidChangeTextDocumentParams struct { + // TextDocument is the document that did change. The version number points + // to the version after all provided content changes have + // been applied. + TextDocument VersionedTextDocumentIdentifier `json:"textDocument"` + + // ContentChanges is the actual content changes. The content changes describe single state changes + // to the document. So if there are two content changes c1 and c2 for a document + // in state S then c1 move the document to S' and c2 to S''. + ContentChanges []TextDocumentContentChangeEvent `json:"contentChanges"` // []TextDocumentContentChangeEvent | text +} + +// TextDocumentSaveReason represents reasons why a text document is saved. +type TextDocumentSaveReason float64 + +const ( + // TextDocumentSaveReasonManual is the manually triggered, e.g. by the user pressing save, by starting debugging, + // or by an API call. + TextDocumentSaveReasonManual TextDocumentSaveReason = 1 + + // TextDocumentSaveReasonAfterDelay is the automatic after a delay. + TextDocumentSaveReasonAfterDelay TextDocumentSaveReason = 2 + + // TextDocumentSaveReasonFocusOut when the editor lost focus. + TextDocumentSaveReasonFocusOut TextDocumentSaveReason = 3 +) + +// String implements fmt.Stringer. +func (t TextDocumentSaveReason) String() string { + switch t { + case TextDocumentSaveReasonManual: + return "Manual" + case TextDocumentSaveReasonAfterDelay: + return "AfterDelay" + case TextDocumentSaveReasonFocusOut: + return "FocusOut" + default: + return strconv.FormatFloat(float64(t), 'f', -10, 64) + } +} + +// TextDocumentChangeRegistrationOptions describe options to be used when registering for text document change events. +type TextDocumentChangeRegistrationOptions struct { + TextDocumentRegistrationOptions + + // SyncKind how documents are synced to the server. See TextDocumentSyncKind.Full + // and TextDocumentSyncKind.Incremental. + SyncKind TextDocumentSyncKind `json:"syncKind"` +} + +// WillSaveTextDocumentParams is the parameters send in a will save text document notification. +type WillSaveTextDocumentParams struct { + // TextDocument is the document that will be saved. + TextDocument TextDocumentIdentifier `json:"textDocument"` + + // Reason is the 'TextDocumentSaveReason'. + Reason TextDocumentSaveReason `json:"reason,omitempty"` +} + +// DidSaveTextDocumentParams params of DidSaveTextDocument notification. +type DidSaveTextDocumentParams struct { + // Text optional the content when saved. Depends on the includeText value + // when the save notification was requested. + Text string `json:"text,omitempty"` + + // TextDocument is the document that was saved. + TextDocument TextDocumentIdentifier `json:"textDocument"` +} + +// TextDocumentContentChangeEvent an event describing a change to a text document. If range and rangeLength are omitted +// the new text is considered to be the full content of the document. +type TextDocumentContentChangeEvent struct { + // Range is the range of the document that changed. + Range Range `json:"range"` + + // RangeLength is the length of the range that got replaced. + RangeLength uint32 `json:"rangeLength,omitempty"` + + // Text is the new text of the document. + Text string `json:"text"` +} + +// TextDocumentSaveRegistrationOptions TextDocumentSave Registration options. +type TextDocumentSaveRegistrationOptions struct { + TextDocumentRegistrationOptions + + // IncludeText is the client is supposed to include the content on save. + IncludeText bool `json:"includeText,omitempty"` +} + +// DidCloseTextDocumentParams params of DidCloseTextDocument notification. +type DidCloseTextDocumentParams struct { + // TextDocument the document that was closed. + TextDocument TextDocumentIdentifier `json:"textDocument"` +} diff --git a/vendor/go.lsp.dev/protocol/util.go b/vendor/go.lsp.dev/protocol/util.go new file mode 100644 index 00000000000..4dc29c438ab --- /dev/null +++ b/vendor/go.lsp.dev/protocol/util.go @@ -0,0 +1,9 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +// NewVersion returns the int32 pointer converted i. +func NewVersion(i int32) *int32 { + return &i +} diff --git a/vendor/go.lsp.dev/protocol/version.go b/vendor/go.lsp.dev/protocol/version.go new file mode 100644 index 00000000000..79a27f348fe --- /dev/null +++ b/vendor/go.lsp.dev/protocol/version.go @@ -0,0 +1,7 @@ +// SPDX-FileCopyrightText: 2018 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +// Version is the version of the language-server-protocol specification being implemented. +const Version = "3.15.3" diff --git a/vendor/go.lsp.dev/protocol/window.go b/vendor/go.lsp.dev/protocol/window.go new file mode 100644 index 00000000000..b6af6f43818 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/window.go @@ -0,0 +1,111 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import "strconv" + +// ShowMessageParams params of ShowMessage notification. +type ShowMessageParams struct { + // Message is the actual message. + Message string `json:"message"` + + // Type is the message type. + Type MessageType `json:"type"` +} + +// MessageType type of ShowMessageParams type. +type MessageType float64 + +const ( + // MessageTypeError an error message. + MessageTypeError MessageType = 1 + // MessageTypeWarning a warning message. + MessageTypeWarning MessageType = 2 + // MessageTypeInfo an information message. + MessageTypeInfo MessageType = 3 + // MessageTypeLog a log message. + MessageTypeLog MessageType = 4 +) + +// String implements fmt.Stringer. +func (m MessageType) String() string { + switch m { + case MessageTypeError: + return "error" + case MessageTypeWarning: + return "warning" + case MessageTypeInfo: + return "info" + case MessageTypeLog: + return "log" + default: + return strconv.FormatFloat(float64(m), 'f', -10, 64) + } +} + +// Enabled reports whether the level is enabled. +func (m MessageType) Enabled(level MessageType) bool { + return level > 0 && m >= level +} + +// messageTypeMap map of MessageTypes. +var messageTypeMap = map[string]MessageType{ + "error": MessageTypeError, + "warning": MessageTypeWarning, + "info": MessageTypeInfo, + "log": MessageTypeLog, +} + +// ToMessageType converts level to the MessageType. +func ToMessageType(level string) MessageType { + mt, ok := messageTypeMap[level] + if !ok { + return MessageType(0) // unknown + } + + return mt +} + +// ShowMessageRequestParams params of ShowMessage request. +type ShowMessageRequestParams struct { + // Actions is the message action items to present. + Actions []MessageActionItem `json:"actions"` + + // Message is the actual message + Message string `json:"message"` + + // Type is the message type. See {@link MessageType} + Type MessageType `json:"type"` +} + +// MessageActionItem item of ShowMessageRequestParams action. +type MessageActionItem struct { + // Title a short title like 'Retry', 'Open Log' etc. + Title string `json:"title"` +} + +// LogMessageParams params of LogMessage notification. +type LogMessageParams struct { + // Message is the actual message + Message string `json:"message"` + + // Type is the message type. See {@link MessageType} + Type MessageType `json:"type"` +} + +// WorkDoneProgressCreateParams params of WorkDoneProgressCreate request. +// +// @since 3.15.0. +type WorkDoneProgressCreateParams struct { + // Token is the token to be used to report progress. + Token ProgressToken `json:"token"` +} + +// WorkDoneProgressCreateParams params of WorkDoneProgressCancel request. +// +// @since 3.15.0. +type WorkDoneProgressCancelParams struct { + // Token is the token to be used to report progress. + Token ProgressToken `json:"token"` +} diff --git a/vendor/go.lsp.dev/protocol/workspace.go b/vendor/go.lsp.dev/protocol/workspace.go new file mode 100644 index 00000000000..3d39cd76626 --- /dev/null +++ b/vendor/go.lsp.dev/protocol/workspace.go @@ -0,0 +1,213 @@ +// SPDX-FileCopyrightText: 2019 The Go Language Server Authors +// SPDX-License-Identifier: BSD-3-Clause + +package protocol + +import ( + "strconv" + + "go.lsp.dev/uri" +) + +// WorkspaceFolder response of Workspace folders request. +type WorkspaceFolder struct { + // URI is the associated URI for this workspace folder. + URI string `json:"uri"` + + // Name is the name of the workspace folder. Used to refer to this + // workspace folder in the user interface. + Name string `json:"name"` +} + +// DidChangeWorkspaceFoldersParams params of DidChangeWorkspaceFolders notification. +type DidChangeWorkspaceFoldersParams struct { + // Event is the actual workspace folder change event. + Event WorkspaceFoldersChangeEvent `json:"event"` +} + +// WorkspaceFoldersChangeEvent is the workspace folder change event. +type WorkspaceFoldersChangeEvent struct { + // Added is the array of added workspace folders + Added []WorkspaceFolder `json:"added"` + + // Removed is the array of the removed workspace folders + Removed []WorkspaceFolder `json:"removed"` +} + +// DidChangeConfigurationParams params of DidChangeConfiguration notification. +type DidChangeConfigurationParams struct { + // Settings is the actual changed settings + Settings interface{} `json:"settings,omitempty"` +} + +// ConfigurationParams params of Configuration request. +type ConfigurationParams struct { + Items []ConfigurationItem `json:"items"` +} + +// ConfigurationItem a ConfigurationItem consists of the configuration section to ask for and an additional scope URI. +// The configuration section ask for is defined by the server and doesn’t necessarily need to correspond to the configuration store used be the client. +// So a server might ask for a configuration cpp.formatterOptions but the client stores the configuration in a XML store layout differently. +// It is up to the client to do the necessary conversion. If a scope URI is provided the client should return the setting scoped to the provided resource. +// If the client for example uses EditorConfig to manage its settings the configuration should be returned for the passed resource URI. If the client can’t provide a configuration setting for a given scope then null need to be present in the returned array. +type ConfigurationItem struct { + // ScopeURI is the scope to get the configuration section for. + ScopeURI uri.URI `json:"scopeUri,omitempty"` + + // Section is the configuration section asked for. + Section string `json:"section,omitempty"` +} + +// DidChangeWatchedFilesParams params of DidChangeWatchedFiles notification. +type DidChangeWatchedFilesParams struct { + // Changes is the actual file events. + Changes []*FileEvent `json:"changes,omitempty"` +} + +// FileEvent an event describing a file change. +type FileEvent struct { + // Type is the change type. + Type FileChangeType `json:"type"` + + // URI is the file's URI. + URI uri.URI `json:"uri"` +} + +// FileChangeType is the file event type. +type FileChangeType float64 + +const ( + // FileChangeTypeCreated is the file got created. + FileChangeTypeCreated FileChangeType = 1 + // FileChangeTypeChanged is the file got changed. + FileChangeTypeChanged FileChangeType = 2 + // FileChangeTypeDeleted is the file got deleted. + FileChangeTypeDeleted FileChangeType = 3 +) + +// String implements fmt.Stringer. +func (t FileChangeType) String() string { + switch t { + case FileChangeTypeCreated: + return "Created" + case FileChangeTypeChanged: + return "Changed" + case FileChangeTypeDeleted: + return "Deleted" + default: + return strconv.FormatFloat(float64(t), 'f', -10, 64) + } +} + +// DidChangeWatchedFilesRegistrationOptions describe options to be used when registering for file system change events. +type DidChangeWatchedFilesRegistrationOptions struct { + // Watchers is the watchers to register. + Watchers []FileSystemWatcher `json:"watchers"` +} + +// FileSystemWatcher watchers of DidChangeWatchedFiles Registration options. +type FileSystemWatcher struct { + // GlobPattern is the glob pattern to watch. + // + // Glob patterns can have the following syntax: + // - `*` to match one or more characters in a path segment + // - `?` to match on one character in a path segment + // - `**` to match any number of path segments, including none + // - `{}` to group conditions (e.g. `**/*.{ts,js}` matches all TypeScript and JavaScript files) + // - `[]` to declare a range of characters to match in a path segment (e.g., `example.[0-9]` to match on `example.0`, `example.1`, …) + // - `[!...]` to negate a range of characters to match in a path segment (e.g., `example.[!0-9]` to match on `example.a`, `example.b`, but not `example.0`) + GlobPattern string `json:"globPattern"` + + // Kind is the kind of events of interest. If omitted it defaults + // to WatchKind.Create | WatchKind.Change | WatchKind.Delete + // which is 7. + Kind WatchKind `json:"kind,omitempty"` +} + +// WatchKind kind of FileSystemWatcher kind. +type WatchKind float64 + +const ( + // WatchKindCreate interested in create events. + WatchKindCreate WatchKind = 1 + + // WatchKindChange interested in change events. + WatchKindChange WatchKind = 2 + + // WatchKindDelete interested in delete events. + WatchKindDelete WatchKind = 4 +) + +// String implements fmt.Stringer. +func (k WatchKind) String() string { + switch k { + case WatchKindCreate: + return "Create" + case WatchKindChange: + return "Change" + case WatchKindDelete: + return "Delete" + default: + return strconv.FormatFloat(float64(k), 'f', -10, 64) + } +} + +// WorkspaceSymbolParams is the parameters of a Workspace Symbol request. +type WorkspaceSymbolParams struct { + WorkDoneProgressParams + PartialResultParams + + // Query a query string to filter symbols by. + // + // Clients may send an empty string here to request all symbols. + Query string `json:"query"` +} + +// ExecuteCommandParams params of Execute a command. +type ExecuteCommandParams struct { + WorkDoneProgressParams + + // Command is the identifier of the actual command handler. + Command string `json:"command"` + + // Arguments that the command should be invoked with. + Arguments []interface{} `json:"arguments,omitempty"` +} + +// ExecuteCommandRegistrationOptions execute command registration options. +type ExecuteCommandRegistrationOptions struct { + // Commands is the commands to be executed on the server + Commands []string `json:"commands"` +} + +// ApplyWorkspaceEditParams params of Applies a WorkspaceEdit. +type ApplyWorkspaceEditParams struct { + // Label an optional label of the workspace edit. This label is + // presented in the user interface for example on an undo + // stack to undo the workspace edit. + Label string `json:"label,omitempty"` + + // Edit is the edits to apply. + Edit WorkspaceEdit `json:"edit"` +} + +// ApplyWorkspaceEditResponse response of Applies a WorkspaceEdit. +type ApplyWorkspaceEditResponse struct { + // Applied indicates whether the edit was applied or not. + Applied bool `json:"applied"` + + // FailureReason an optional textual description for why the edit was not applied. + // This may be used by the server for diagnostic logging or to provide + // a suitable error for a request that triggered the edit. + // + // @since 3.16.0. + FailureReason string `json:"failureReason,omitempty"` + + // FailedChange depending on the client's failure handling strategy "failedChange" + // might contain the index of the change that failed. This property is + // only available if the client signals a "failureHandlingStrategy" + // in its client capabilities. + // + // @since 3.16.0. + FailedChange uint32 `json:"failedChange,omitempty"` +} diff --git a/vendor/go.lsp.dev/uri/.codecov.yml b/vendor/go.lsp.dev/uri/.codecov.yml new file mode 100644 index 00000000000..cb891cd5e0b --- /dev/null +++ b/vendor/go.lsp.dev/uri/.codecov.yml @@ -0,0 +1,24 @@ +coverage: + precision: 1 + round: down + range: "70...100" + + status: + project: + default: off + target: auto + threshold: 10% + if_not_found: success + if_ci_failed: error + patch: + default: off + only_pulls: true + target: 50% + threshold: 10% + changes: false + ignore: + - "vendor" + +comment: + behavior: default + require_changes: true diff --git a/vendor/go.lsp.dev/uri/.gitattributes b/vendor/go.lsp.dev/uri/.gitattributes new file mode 100644 index 00000000000..cae764e31f6 --- /dev/null +++ b/vendor/go.lsp.dev/uri/.gitattributes @@ -0,0 +1,11 @@ +# go.lsp.dev/uri project gitattributes file +# https://github.com/github/linguist#using-gitattributes +# https://github.com/github/linguist/blob/master/lib/linguist/languages.yml + +# To prevent CRLF breakages on Windows for fragile files, like testdata. +* -text + +docs/ linguist-documentation +*.pb.go linguist-generated +*_gen.go linguist-generated +*_string.go linguist-generated diff --git a/vendor/go.lsp.dev/uri/.gitignore b/vendor/go.lsp.dev/uri/.gitignore new file mode 100644 index 00000000000..04497949dff --- /dev/null +++ b/vendor/go.lsp.dev/uri/.gitignore @@ -0,0 +1,49 @@ +# go.lsp.dev/uri project generated files to ignore +# if you want to ignore files created by your editor/tools, +# please consider a global .gitignore https://help.github.com/articles/ignoring-files +# please do not open a pull request to add something created by your editor or tools + +# github/gitignore/Go.gitignore +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +vendor/ + +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +# cgo generated +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +# test generated +_testmain.go + +# profile +*.pprof + +# coverage +coverage.* diff --git a/vendor/go.lsp.dev/uri/.golangci.yml b/vendor/go.lsp.dev/uri/.golangci.yml new file mode 100644 index 00000000000..14d15c23517 --- /dev/null +++ b/vendor/go.lsp.dev/uri/.golangci.yml @@ -0,0 +1,151 @@ +run: + issues-exit-code: 1 + tests: true + skip-dirs: + - "vendor$" + skip-files: + - ".*\\.pb\\.go" + - ".*(.|_)gen\\.go" + modules-download-mode: vendor + +linters-settings: + dupl: + threshold: 400 + errcheck: + check-type-assertions: true + check-blank: true + # exclude: .errcheckignore + funlen: + lines: 80 + statements: 40 + goconst: + min-len: 3 + min-occurrences: 3 + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + settings: + hugeParam: + sizeThreshold: 80 + rangeExprCopy: + sizeThreshold: 512 + rangeValCopy: + sizeThreshold: 128 + gocyclo: + min-complexity: 10 + gofmt: + simplify: true + goimports: + local-prefixes: go.lsp.dev/uri + golint: + min-confidence: 0.3 + govet: + enable: + - asmdecl + - assign + - atomic + - atomicalign + - bools + - buildssa + - buildtag + - cgocall + - composite + - copylock + - ctrlflow + - deepequalerrors + - errorsas + - findcall + - httpresponse + - inspect + - loopclosure + - lostcancel + - nilfunc + - nilness + - pkgfact + - printf + - shift + - sortslice + - stdmethods + - structtag + - tests + - unmarshal + - unreachable + - unsafeptr + - unusedresult + disable: + - shadow + lll: + line-length: 180 + tab-width: 1 + maligned: + suggest-new: false + misspell: + locale: US + nakedret: + max-func-lines: 30 + prealloc: + simple: true + range-loops: true + for-loops: false + unparam: + algo: cha + check-exported: true + unused: + check-exported: false + +linters: + # disabled: + # - funlen + # - gochecknoglobals + # - gochecknoinits + # - gocyclo + # - godox + # - gomnd + # - maligned + # - megacheck + # - scopelint + # - wsl + disable-all: true + enable: + - bodyclose + - deadcode + - depguard + - dogsled + - dupl + - errcheck + - gocognit + - goconst + - gocritic + - gofmt + - goimports + - golint + - gosec + - gosimple + - govet + - ineffassign + - interfacer + - lll + - misspell + - nakedret + - prealloc + - staticcheck + - structcheck + - stylecheck + - typecheck + - unconvert + - unparam + - unused + - varcheck + - whitespace + +issues: + exclude-use-default: true + +output: + format: colored-line-number + print-issued-lines: true + print-linter-name: true diff --git a/vendor/go.lsp.dev/uri/LICENSE b/vendor/go.lsp.dev/uri/LICENSE new file mode 100644 index 00000000000..e8748709cfb --- /dev/null +++ b/vendor/go.lsp.dev/uri/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2019, The Go Language Server Authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/go.lsp.dev/uri/Makefile b/vendor/go.lsp.dev/uri/Makefile new file mode 100644 index 00000000000..f4aeab60dfb --- /dev/null +++ b/vendor/go.lsp.dev/uri/Makefile @@ -0,0 +1,15 @@ +# ---------------------------------------------------------------------------- +# global + +.DEFAULT_GOAL = test + +# ---------------------------------------------------------------------------- +# target + +# ---------------------------------------------------------------------------- +# include + +include hack/make/go.mk + +# ---------------------------------------------------------------------------- +# overlays diff --git a/vendor/go.lsp.dev/uri/README.md b/vendor/go.lsp.dev/uri/README.md new file mode 100644 index 00000000000..5d8909cc3b7 --- /dev/null +++ b/vendor/go.lsp.dev/uri/README.md @@ -0,0 +1,19 @@ +# uri + +[![CircleCI][circleci-badge]][circleci] [![pkg.go.dev][pkg.go.dev-badge]][pkg.go.dev] [![Go module][module-badge]][module] [![codecov.io][codecov-badge]][codecov] [![GA][ga-badge]][ga] + +Package uri is an implementation of the URI Uniform Resource Identifier(RFC3986) specification for Go. + + + +[circleci]: https://app.circleci.com/pipelines/github/go-language-server/uri +[pkg.go.dev]: https://pkg.go.dev/go.lsp.dev/uri +[module]: https://github.com/go-language-server/uri/releases/latest +[codecov]: https://codecov.io/gh/go-language-server/uri +[ga]: https://github.com/go-language-server/uri + +[circleci-badge]: https://img.shields.io/circleci/build/github/go-language-server/uri/master.svg?style=for-the-badge&label=CIRCLECI&logo=circleci +[pkg.go.dev-badge]: https://bit.ly/shields-io-pkg-go-dev +[module-badge]: https://img.shields.io/github/release/go-language-server/uri.svg?color=00add8&label=MODULE&style=for-the-badge&logoWidth=25&logo=data%3Aimage%2Fsvg%2Bxml%3Bbase64%2CPHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9Ijg1IDU1IDEyMCAxMjAiPjxwYXRoIGZpbGw9IiMwMEFERDgiIGQ9Ik00MC4yIDEwMS4xYy0uNCAwLS41LS4yLS4zLS41bDIuMS0yLjdjLjItLjMuNy0uNSAxLjEtLjVoMzUuN2MuNCAwIC41LjMuMy42bC0xLjcgMi42Yy0uMi4zLS43LjYtMSAuNmwtMzYuMi0uMXptLTE1LjEgOS4yYy0uNCAwLS41LS4yLS4zLS41bDIuMS0yLjdjLjItLjMuNy0uNSAxLjEtLjVoNDUuNmMuNCAwIC42LjMuNS42bC0uOCAyLjRjLS4xLjQtLjUuNi0uOS42bC00Ny4zLjF6bTI0LjIgOS4yYy0uNCAwLS41LS4zLS4zLS42bDEuNC0yLjVjLjItLjMuNi0uNiAxLS42aDIwYy40IDAgLjYuMy42LjdsLS4yIDIuNGMwIC40LS40LjctLjcuN2wtMjEuOC0uMXptMTAzLjgtMjAuMmMtNi4zIDEuNi0xMC42IDIuOC0xNi44IDQuNC0xLjUuNC0xLjYuNS0yLjktMS0xLjUtMS43LTIuNi0yLjgtNC43LTMuOC02LjMtMy4xLTEyLjQtMi4yLTE4LjEgMS41LTYuOCA0LjQtMTAuMyAxMC45LTEwLjIgMTkgLjEgOCA1LjYgMTQuNiAxMy41IDE1LjcgNi44LjkgMTIuNS0xLjUgMTctNi42LjktMS4xIDEuNy0yLjMgMi43LTMuN2gtMTkuM2MtMi4xIDAtMi42LTEuMy0xLjktMyAxLjMtMy4xIDMuNy04LjMgNS4xLTEwLjkuMy0uNiAxLTEuNiAyLjUtMS42aDM2LjRjLS4yIDIuNy0uMiA1LjQtLjYgOC4xLTEuMSA3LjItMy44IDEzLjgtOC4yIDE5LjYtNy4yIDkuNS0xNi42IDE1LjQtMjguNSAxNy05LjggMS4zLTE4LjktLjYtMjYuOS02LjYtNy40LTUuNi0xMS42LTEzLTEyLjctMjIuMi0xLjMtMTAuOSAxLjktMjAuNyA4LjUtMjkuMyA3LjEtOS4zIDE2LjUtMTUuMiAyOC0xNy4zIDkuNC0xLjcgMTguNC0uNiAyNi41IDQuOSA1LjMgMy41IDkuMSA4LjMgMTEuNiAxNC4xLjYuOS4yIDEuNC0xIDEuN3oiLz48cGF0aCBmaWxsPSIjMDBBREQ4IiBkPSJNMTg2LjIgMTU0LjZjLTkuMS0uMi0xNy40LTIuOC0yNC40LTguOC01LjktNS4xLTkuNi0xMS42LTEwLjgtMTkuMy0xLjgtMTEuMyAxLjMtMjEuMyA4LjEtMzAuMiA3LjMtOS42IDE2LjEtMTQuNiAyOC0xNi43IDEwLjItMS44IDE5LjgtLjggMjguNSA1LjEgNy45IDUuNCAxMi44IDEyLjcgMTQuMSAyMi4zIDEuNyAxMy41LTIuMiAyNC41LTExLjUgMzMuOS02LjYgNi43LTE0LjcgMTAuOS0yNCAxMi44LTIuNy41LTUuNC42LTggLjl6bTIzLjgtNDAuNGMtLjEtMS4zLS4xLTIuMy0uMy0zLjMtMS44LTkuOS0xMC45LTE1LjUtMjAuNC0xMy4zLTkuMyAyLjEtMTUuMyA4LTE3LjUgMTcuNC0xLjggNy44IDIgMTUuNyA5LjIgMTguOSA1LjUgMi40IDExIDIuMSAxNi4zLS42IDcuOS00LjEgMTIuMi0xMC41IDEyLjctMTkuMXoiLz48L3N2Zz4= +[codecov-badge]: https://img.shields.io/codecov/c/github/go-language-server/uri/master?logo=codecov&style=for-the-badge +[ga-badge]: https://gh-ga-beacon.appspot.com/UA-89201129-1/go-language-server/uri?useReferer&pixel diff --git a/vendor/go.lsp.dev/uri/doc.go b/vendor/go.lsp.dev/uri/doc.go new file mode 100644 index 00000000000..3d9d72e0756 --- /dev/null +++ b/vendor/go.lsp.dev/uri/doc.go @@ -0,0 +1,6 @@ +// Copyright 2019 The Go Language Server Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uri is an implementation of the URI Uniform Resource Identifier(RFC3986) specification for Go. +package uri // import "go.lsp.dev/uri" diff --git a/vendor/go.lsp.dev/uri/uri.go b/vendor/go.lsp.dev/uri/uri.go new file mode 100644 index 00000000000..32fb3135875 --- /dev/null +++ b/vendor/go.lsp.dev/uri/uri.go @@ -0,0 +1,192 @@ +// Copyright 2019 The Go Language Server Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uri + +import ( + "errors" + "fmt" + "net/url" + "path/filepath" + "runtime" + "strings" + "unicode" +) + +const ( + // FileScheme schema of filesystem path. + FileScheme = "file" + + // HTTPScheme schema of http. + HTTPScheme = "http" + + // HTTPSScheme schema of https. + HTTPSScheme = "https" +) + +const ( + hierPart = "://" +) + +// URI Uniform Resource Identifier (URI) https://tools.ietf.org/html/rfc3986. +// +// This class is a simple parser which creates the basic component parts +// (http://tools.ietf.org/html/rfc3986#section-3) with minimal validation +// and encoding. +// +// foo://example.com:8042/over/there?name=ferret#nose +// \_/ \______________/\_________/ \_________/ \__/ +// | | | | | +// scheme authority path query fragment +// | _____________________|__ +// / \ / \ +// urn:example:animal:ferret:nose +type URI string + +// Filename returns the file path for the given URI. +// It is an error to call this on a URI that is not a valid filename. +func (u URI) Filename() string { + filename, err := filename(u) + if err != nil { + panic(err) + } + + return filepath.FromSlash(filename) +} + +func filename(uri URI) (string, error) { + u, err := url.ParseRequestURI(string(uri)) + if err != nil { + return "", fmt.Errorf("failed to parse request URI: %w", err) + } + + if u.Scheme != FileScheme { + return "", fmt.Errorf("only file URIs are supported, got %v", u.Scheme) + } + + if isWindowsDriveURI(u.Path) { + u.Path = u.Path[1:] + } + + return u.Path, nil +} + +// New parses and creates a new URI from s. +func New(s string) URI { + if u, err := url.PathUnescape(s); err == nil { + s = u + } + + if strings.HasPrefix(s, FileScheme+hierPart) { + return URI(s) + } + + return File(s) +} + +// File parses and creates a new filesystem URI from path. +func File(path string) URI { + const goRootPragma = "$GOROOT" + if len(path) >= len(goRootPragma) && strings.EqualFold(goRootPragma, path[:len(goRootPragma)]) { + path = runtime.GOROOT() + path[len(goRootPragma):] + } + + if !isWindowsDrivePath(path) { + if abs, err := filepath.Abs(path); err == nil { + path = abs + } + } + + if isWindowsDrivePath(path) { + path = "/" + path + } + + path = filepath.ToSlash(path) + u := url.URL{ + Scheme: FileScheme, + Path: path, + } + + return URI(u.String()) +} + +// Parse parses and creates a new URI from s. +func Parse(s string) (u URI, err error) { + us, err := url.Parse(s) + if err != nil { + return u, fmt.Errorf("url.Parse: %w", err) + } + + switch us.Scheme { + case FileScheme: + ut := url.URL{ + Scheme: FileScheme, + Path: us.Path, + RawPath: filepath.FromSlash(us.Path), + } + u = URI(ut.String()) + + case HTTPScheme, HTTPSScheme: + ut := url.URL{ + Scheme: us.Scheme, + Host: us.Host, + Path: us.Path, + RawQuery: us.Query().Encode(), + Fragment: us.Fragment, + } + u = URI(ut.String()) + + default: + return u, errors.New("unknown scheme") + } + + return +} + +// From returns the new URI from args. +func From(scheme, authority, path, query, fragment string) URI { + switch scheme { + case FileScheme: + u := url.URL{ + Scheme: FileScheme, + Path: path, + RawPath: filepath.FromSlash(path), + } + return URI(u.String()) + + case HTTPScheme, HTTPSScheme: + u := url.URL{ + Scheme: scheme, + Host: authority, + Path: path, + RawQuery: url.QueryEscape(query), + Fragment: fragment, + } + return URI(u.String()) + + default: + panic(fmt.Sprintf("unknown scheme: %s", scheme)) + } +} + +// isWindowsDrivePath returns true if the file path is of the form used by Windows. +// +// We check if the path begins with a drive letter, followed by a ":". +func isWindowsDrivePath(path string) bool { + if len(path) < 4 { + return false + } + return unicode.IsLetter(rune(path[0])) && path[1] == ':' +} + +// isWindowsDriveURI returns true if the file URI is of the format used by +// Windows URIs. The url.Parse package does not specially handle Windows paths +// (see https://golang.org/issue/6027). We check if the URI path has +// a drive prefix (e.g. "/C:"). If so, we trim the leading "/". +func isWindowsDriveURI(uri string) bool { + if len(uri) < 4 { + return false + } + return uri[0] == '/' && unicode.IsLetter(rune(uri[1])) && uri[2] == ':' +} diff --git a/vendor/golang.org/x/tools/go/analysis/analysis.go b/vendor/golang.org/x/tools/go/analysis/analysis.go new file mode 100644 index 00000000000..aa02eeda680 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/analysis.go @@ -0,0 +1,249 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package analysis + +import ( + "flag" + "fmt" + "go/ast" + "go/token" + "go/types" + "reflect" +) + +// An Analyzer describes an analysis function and its options. +type Analyzer struct { + // The Name of the analyzer must be a valid Go identifier + // as it may appear in command-line flags, URLs, and so on. + Name string + + // Doc is the documentation for the analyzer. + // The part before the first "\n\n" is the title + // (no capital or period, max ~60 letters). + Doc string + + // URL holds an optional link to a web page with additional + // documentation for this analyzer. + URL string + + // Flags defines any flags accepted by the analyzer. + // The manner in which these flags are exposed to the user + // depends on the driver which runs the analyzer. + Flags flag.FlagSet + + // Run applies the analyzer to a package. + // It returns an error if the analyzer failed. + // + // On success, the Run function may return a result + // computed by the Analyzer; its type must match ResultType. + // The driver makes this result available as an input to + // another Analyzer that depends directly on this one (see + // Requires) when it analyzes the same package. + // + // To pass analysis results between packages (and thus + // potentially between address spaces), use Facts, which are + // serializable. + Run func(*Pass) (interface{}, error) + + // RunDespiteErrors allows the driver to invoke + // the Run method of this analyzer even on a + // package that contains parse or type errors. + // The Pass.TypeErrors field may consequently be non-empty. + RunDespiteErrors bool + + // Requires is a set of analyzers that must run successfully + // before this one on a given package. This analyzer may inspect + // the outputs produced by each analyzer in Requires. + // The graph over analyzers implied by Requires edges must be acyclic. + // + // Requires establishes a "horizontal" dependency between + // analysis passes (different analyzers, same package). + Requires []*Analyzer + + // ResultType is the type of the optional result of the Run function. + ResultType reflect.Type + + // FactTypes indicates that this analyzer imports and exports + // Facts of the specified concrete types. + // An analyzer that uses facts may assume that its import + // dependencies have been similarly analyzed before it runs. + // Facts must be pointers. + // + // FactTypes establishes a "vertical" dependency between + // analysis passes (same analyzer, different packages). + FactTypes []Fact +} + +func (a *Analyzer) String() string { return a.Name } + +// A Pass provides information to the Run function that +// applies a specific analyzer to a single Go package. +// +// It forms the interface between the analysis logic and the driver +// program, and has both input and an output components. +// +// As in a compiler, one pass may depend on the result computed by another. +// +// The Run function should not call any of the Pass functions concurrently. +type Pass struct { + Analyzer *Analyzer // the identity of the current analyzer + + // syntax and type information + Fset *token.FileSet // file position information; Run may add new files + Files []*ast.File // the abstract syntax tree of each file + OtherFiles []string // names of non-Go files of this package + IgnoredFiles []string // names of ignored source files in this package + Pkg *types.Package // type information about the package + TypesInfo *types.Info // type information about the syntax trees + TypesSizes types.Sizes // function for computing sizes of types + TypeErrors []types.Error // type errors (only if Analyzer.RunDespiteErrors) + + Module *Module // the package's enclosing module (possibly nil in some drivers) + + // Report reports a Diagnostic, a finding about a specific location + // in the analyzed source code such as a potential mistake. + // It may be called by the Run function. + Report func(Diagnostic) + + // ResultOf provides the inputs to this analysis pass, which are + // the corresponding results of its prerequisite analyzers. + // The map keys are the elements of Analysis.Required, + // and the type of each corresponding value is the required + // analysis's ResultType. + ResultOf map[*Analyzer]interface{} + + // ReadFile returns the contents of the named file. + // + // The only valid file names are the elements of OtherFiles + // and IgnoredFiles, and names returned by + // Fset.File(f.FileStart).Name() for each f in Files. + // + // Analyzers must use this function (if provided) instead of + // accessing the file system directly. This allows a driver to + // provide a virtualized file tree (including, for example, + // unsaved editor buffers) and to track dependencies precisely + // to avoid unnecessary recomputation. + ReadFile func(filename string) ([]byte, error) + + // -- facts -- + + // ImportObjectFact retrieves a fact associated with obj. + // Given a value ptr of type *T, where *T satisfies Fact, + // ImportObjectFact copies the value to *ptr. + // + // ImportObjectFact panics if called after the pass is complete. + // ImportObjectFact is not concurrency-safe. + ImportObjectFact func(obj types.Object, fact Fact) bool + + // ImportPackageFact retrieves a fact associated with package pkg, + // which must be this package or one of its dependencies. + // See comments for ImportObjectFact. + ImportPackageFact func(pkg *types.Package, fact Fact) bool + + // ExportObjectFact associates a fact of type *T with the obj, + // replacing any previous fact of that type. + // + // ExportObjectFact panics if it is called after the pass is + // complete, or if obj does not belong to the package being analyzed. + // ExportObjectFact is not concurrency-safe. + ExportObjectFact func(obj types.Object, fact Fact) + + // ExportPackageFact associates a fact with the current package. + // See comments for ExportObjectFact. + ExportPackageFact func(fact Fact) + + // AllPackageFacts returns a new slice containing all package + // facts of the analysis's FactTypes in unspecified order. + AllPackageFacts func() []PackageFact + + // AllObjectFacts returns a new slice containing all object + // facts of the analysis's FactTypes in unspecified order. + AllObjectFacts func() []ObjectFact + + /* Further fields may be added in future. */ +} + +// PackageFact is a package together with an associated fact. +type PackageFact struct { + Package *types.Package + Fact Fact +} + +// ObjectFact is an object together with an associated fact. +type ObjectFact struct { + Object types.Object + Fact Fact +} + +// Reportf is a helper function that reports a Diagnostic using the +// specified position and formatted error message. +func (pass *Pass) Reportf(pos token.Pos, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + pass.Report(Diagnostic{Pos: pos, Message: msg}) +} + +// The Range interface provides a range. It's equivalent to and satisfied by +// ast.Node. +type Range interface { + Pos() token.Pos // position of first character belonging to the node + End() token.Pos // position of first character immediately after the node +} + +// ReportRangef is a helper function that reports a Diagnostic using the +// range provided. ast.Node values can be passed in as the range because +// they satisfy the Range interface. +func (pass *Pass) ReportRangef(rng Range, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + pass.Report(Diagnostic{Pos: rng.Pos(), End: rng.End(), Message: msg}) +} + +func (pass *Pass) String() string { + return fmt.Sprintf("%s@%s", pass.Analyzer.Name, pass.Pkg.Path()) +} + +// A Fact is an intermediate fact produced during analysis. +// +// Each fact is associated with a named declaration (a types.Object) or +// with a package as a whole. A single object or package may have +// multiple associated facts, but only one of any particular fact type. +// +// A Fact represents a predicate such as "never returns", but does not +// represent the subject of the predicate such as "function F" or "package P". +// +// Facts may be produced in one analysis pass and consumed by another +// analysis pass even if these are in different address spaces. +// If package P imports Q, all facts about Q produced during +// analysis of that package will be available during later analysis of P. +// Facts are analogous to type export data in a build system: +// just as export data enables separate compilation of several passes, +// facts enable "separate analysis". +// +// Each pass (a, p) starts with the set of facts produced by the +// same analyzer a applied to the packages directly imported by p. +// The analysis may add facts to the set, and they may be exported in turn. +// An analysis's Run function may retrieve facts by calling +// Pass.Import{Object,Package}Fact and update them using +// Pass.Export{Object,Package}Fact. +// +// A fact is logically private to its Analysis. To pass values +// between different analyzers, use the results mechanism; +// see Analyzer.Requires, Analyzer.ResultType, and Pass.ResultOf. +// +// A Fact type must be a pointer. +// Facts are encoded and decoded using encoding/gob. +// A Fact may implement the GobEncoder/GobDecoder interfaces +// to customize its encoding. Fact encoding should not fail. +// +// A Fact should not be modified once exported. +type Fact interface { + AFact() // dummy method to avoid type errors +} + +// A Module describes the module to which a package belongs. +type Module struct { + Path string // module path + Version string // module version ("" if unknown, such as for workspace modules) + GoVersion string // go version used in module (e.g. "go1.22.0") +} diff --git a/vendor/golang.org/x/tools/go/analysis/analysistest/analysistest.go b/vendor/golang.org/x/tools/go/analysis/analysistest/analysistest.go new file mode 100644 index 00000000000..c1b2dd4fa1b --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/analysistest/analysistest.go @@ -0,0 +1,695 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package analysistest provides utilities for testing analyzers. +package analysistest + +import ( + "bytes" + "fmt" + "go/format" + "go/token" + "go/types" + "log" + "os" + "path/filepath" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + "testing" + "text/scanner" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/internal/checker" + "golang.org/x/tools/go/packages" + "golang.org/x/tools/internal/diff" + "golang.org/x/tools/internal/testenv" + "golang.org/x/tools/txtar" +) + +// WriteFiles is a helper function that creates a temporary directory +// and populates it with a GOPATH-style project using filemap (which +// maps file names to contents). On success it returns the name of the +// directory and a cleanup function to delete it. +func WriteFiles(filemap map[string]string) (dir string, cleanup func(), err error) { + gopath, err := os.MkdirTemp("", "analysistest") + if err != nil { + return "", nil, err + } + cleanup = func() { os.RemoveAll(gopath) } + + for name, content := range filemap { + filename := filepath.Join(gopath, "src", name) + os.MkdirAll(filepath.Dir(filename), 0777) // ignore error + if err := os.WriteFile(filename, []byte(content), 0666); err != nil { + cleanup() + return "", nil, err + } + } + return gopath, cleanup, nil +} + +// TestData returns the effective filename of +// the program's "testdata" directory. +// This function may be overridden by projects using +// an alternative build system (such as Blaze) that +// does not run a test in its package directory. +var TestData = func() string { + testdata, err := filepath.Abs("testdata") + if err != nil { + log.Fatal(err) + } + return testdata +} + +// Testing is an abstraction of a *testing.T. +type Testing interface { + Errorf(format string, args ...interface{}) +} + +// RunWithSuggestedFixes behaves like Run, but additionally verifies suggested fixes. +// It uses golden files placed alongside the source code under analysis: +// suggested fixes for code in example.go will be compared against example.go.golden. +// +// Golden files can be formatted in one of two ways: as plain Go source code, or as txtar archives. +// In the first case, all suggested fixes will be applied to the original source, which will then be compared against the golden file. +// In the second case, suggested fixes will be grouped by their messages, and each set of fixes will be applied and tested separately. +// Each section in the archive corresponds to a single message. +// +// A golden file using txtar may look like this: +// +// -- turn into single negation -- +// package pkg +// +// func fn(b1, b2 bool) { +// if !b1 { // want `negating a boolean twice` +// println() +// } +// } +// +// -- remove double negation -- +// package pkg +// +// func fn(b1, b2 bool) { +// if b1 { // want `negating a boolean twice` +// println() +// } +// } +// +// # Conflicts +// +// A single analysis pass may offer two or more suggested fixes that +// (1) conflict but are nonetheless logically composable, (e.g. +// because both update the import declaration), or (2) are +// fundamentally incompatible (e.g. alternative fixes to the same +// statement). +// +// It is up to the driver to decide how to apply such fixes. A +// sophisticated driver could attempt to resolve conflicts of the +// first kind, but this test driver simply reports the fact of the +// conflict with the expectation that the user will split their tests +// into nonconflicting parts. +// +// Conflicts of the second kind can be avoided by giving the +// alternative fixes different names (SuggestedFix.Message) and +// defining the .golden file as a multi-section txtar file with a +// named section for each alternative fix, as shown above. +// +// Analyzers that compute fixes from a textual diff of the +// before/after file contents (instead of directly from syntax tree +// positions) may produce fixes that, although logically +// non-conflicting, nonetheless conflict due to the particulars of the +// diff algorithm. In such cases it may suffice to introduce +// sufficient separation of the statements in the test input so that +// the computed diffs do not overlap. If that fails, break the test +// into smaller parts. +// +// TODO(adonovan): the behavior of RunWithSuggestedFixes as documented +// above is impractical for tests that report multiple diagnostics and +// offer multiple alternative fixes for the same diagnostic, and it is +// inconsistent with the interpretation of multiple diagnostics +// described at Diagnostic.SuggestedFixes. +// We need to rethink the analyzer testing API to better support such +// cases. In the meantime, users of RunWithSuggestedFixes testing +// analyzers that offer alternative fixes are advised to put each fix +// in a separate .go file in the testdata. +func RunWithSuggestedFixes(t Testing, dir string, a *analysis.Analyzer, patterns ...string) []*Result { + r := Run(t, dir, a, patterns...) + + // If the immediate caller of RunWithSuggestedFixes is in + // x/tools, we apply stricter checks as required by gopls. + inTools := false + { + var pcs [1]uintptr + n := runtime.Callers(1, pcs[:]) + frames := runtime.CallersFrames(pcs[:n]) + fr, _ := frames.Next() + if fr.Func != nil && strings.HasPrefix(fr.Func.Name(), "golang.org/x/tools/") { + inTools = true + } + } + + // Process each result (package) separately, matching up the suggested + // fixes into a diff, which we will compare to the .golden file. We have + // to do this per-result in case a file appears in two packages, such as in + // packages with tests, where mypkg/a.go will appear in both mypkg and + // mypkg.test. In that case, the analyzer may suggest the same set of + // changes to a.go for each package. If we merge all the results, those + // changes get doubly applied, which will cause conflicts or mismatches. + // Validating the results separately means as long as the two analyses + // don't produce conflicting suggestions for a single file, everything + // should match up. + for _, act := range r { + // file -> message -> edits + fileEdits := make(map[*token.File]map[string][]diff.Edit) + fileContents := make(map[*token.File][]byte) + + // Validate edits, prepare the fileEdits map and read the file contents. + for _, diag := range act.Diagnostics { + for _, fix := range diag.SuggestedFixes { + + // Assert that lazy fixes have a Category (#65578, #65087). + if inTools && len(fix.TextEdits) == 0 && diag.Category == "" { + t.Errorf("missing Diagnostic.Category for SuggestedFix without TextEdits (gopls requires the category for the name of the fix command") + } + + for _, edit := range fix.TextEdits { + start, end := edit.Pos, edit.End + if !end.IsValid() { + end = start + } + // Validate the edit. + if start > end { + t.Errorf( + "diagnostic for analysis %v contains Suggested Fix with malformed edit: pos (%v) > end (%v)", + act.Pass.Analyzer.Name, start, end) + continue + } + file, endfile := act.Pass.Fset.File(start), act.Pass.Fset.File(end) + if file == nil || endfile == nil || file != endfile { + t.Errorf( + "diagnostic for analysis %v contains Suggested Fix with malformed spanning files %v and %v", + act.Pass.Analyzer.Name, file.Name(), endfile.Name()) + continue + } + if _, ok := fileContents[file]; !ok { + contents, err := os.ReadFile(file.Name()) + if err != nil { + t.Errorf("error reading %s: %v", file.Name(), err) + } + fileContents[file] = contents + } + if _, ok := fileEdits[file]; !ok { + fileEdits[file] = make(map[string][]diff.Edit) + } + fileEdits[file][fix.Message] = append(fileEdits[file][fix.Message], diff.Edit{ + Start: file.Offset(start), + End: file.Offset(end), + New: string(edit.NewText), + }) + } + } + } + + for file, fixes := range fileEdits { + // Get the original file contents. + orig, ok := fileContents[file] + if !ok { + t.Errorf("could not find file contents for %s", file.Name()) + continue + } + + // Get the golden file and read the contents. + ar, err := txtar.ParseFile(file.Name() + ".golden") + if err != nil { + t.Errorf("error reading %s.golden: %v", file.Name(), err) + continue + } + + if len(ar.Files) > 0 { + // one virtual file per kind of suggested fix + + if len(ar.Comment) != 0 { + // we allow either just the comment, or just virtual + // files, not both. it is not clear how "both" should + // behave. + t.Errorf("%s.golden has leading comment; we don't know what to do with it", file.Name()) + continue + } + + for sf, edits := range fixes { + found := false + for _, vf := range ar.Files { + if vf.Name == sf { + found = true + // the file may contain multiple trailing + // newlines if the user places empty lines + // between files in the archive. normalize + // this to a single newline. + golden := append(bytes.TrimRight(vf.Data, "\n"), '\n') + + if err := applyDiffsAndCompare(orig, golden, edits, file.Name()); err != nil { + t.Errorf("%s", err) + } + break + } + } + if !found { + t.Errorf("no section for suggested fix %q in %s.golden", sf, file.Name()) + } + } + } else { + // all suggested fixes are represented by a single file + + var catchallEdits []diff.Edit + for _, edits := range fixes { + catchallEdits = append(catchallEdits, edits...) + } + + if err := applyDiffsAndCompare(orig, ar.Comment, catchallEdits, file.Name()); err != nil { + t.Errorf("%s", err) + } + } + } + } + return r +} + +// applyDiffsAndCompare applies edits to src and compares the results against +// golden after formatting both. fileName is use solely for error reporting. +func applyDiffsAndCompare(src, golden []byte, edits []diff.Edit, fileName string) error { + out, err := diff.ApplyBytes(src, edits) + if err != nil { + return fmt.Errorf("%s: error applying fixes: %v (see possible explanations at RunWithSuggestedFixes)", fileName, err) + } + wantRaw, err := format.Source(golden) + if err != nil { + return fmt.Errorf("%s.golden: error formatting golden file: %v\n%s", fileName, err, out) + } + want := string(wantRaw) + + formatted, err := format.Source(out) + if err != nil { + return fmt.Errorf("%s: error formatting resulting source: %v\n%s", fileName, err, out) + } + if got := string(formatted); got != want { + unified := diff.Unified(fileName+".golden", "actual", want, got) + return fmt.Errorf("suggested fixes failed for %s:\n%s", fileName, unified) + } + return nil +} + +// Run applies an analysis to the packages denoted by the "go list" patterns. +// +// It loads the packages from the specified +// directory using golang.org/x/tools/go/packages, runs the analysis on +// them, and checks that each analysis emits the expected diagnostics +// and facts specified by the contents of '// want ...' comments in the +// package's source files. It treats a comment of the form +// "//...// want..." or "/*...// want... */" as if it starts at 'want'. +// +// If the directory contains a go.mod file, Run treats it as the root of the +// Go module in which to work. Otherwise, Run treats it as the root of a +// GOPATH-style tree, with package contained in the src subdirectory. +// +// An expectation of a Diagnostic is specified by a string literal +// containing a regular expression that must match the diagnostic +// message. For example: +// +// fmt.Printf("%s", 1) // want `cannot provide int 1 to %s` +// +// An expectation of a Fact associated with an object is specified by +// 'name:"pattern"', where name is the name of the object, which must be +// declared on the same line as the comment, and pattern is a regular +// expression that must match the string representation of the fact, +// fmt.Sprint(fact). For example: +// +// func panicf(format string, args interface{}) { // want panicf:"printfWrapper" +// +// Package facts are specified by the name "package" and appear on +// line 1 of the first source file of the package. +// +// A single 'want' comment may contain a mixture of diagnostic and fact +// expectations, including multiple facts about the same object: +// +// // want "diag" "diag2" x:"fact1" x:"fact2" y:"fact3" +// +// Unexpected diagnostics and facts, and unmatched expectations, are +// reported as errors to the Testing. +// +// Run reports an error to the Testing if loading or analysis failed. +// Run also returns a Result for each package for which analysis was +// attempted, even if unsuccessful. It is safe for a test to ignore all +// the results, but a test may use it to perform additional checks. +func Run(t Testing, dir string, a *analysis.Analyzer, patterns ...string) []*Result { + if t, ok := t.(testing.TB); ok { + testenv.NeedsGoPackages(t) + } + + pkgs, err := loadPackages(a, dir, patterns...) + if err != nil { + t.Errorf("loading %s: %v", patterns, err) + return nil + } + + if err := analysis.Validate([]*analysis.Analyzer{a}); err != nil { + t.Errorf("Validate: %v", err) + return nil + } + + results := checker.TestAnalyzer(a, pkgs) + for _, result := range results { + if result.Err != nil { + t.Errorf("error analyzing %s: %v", result.Pass, result.Err) + } else { + check(t, dir, result.Pass, result.Diagnostics, result.Facts) + } + } + return results +} + +// A Result holds the result of applying an analyzer to a package. +type Result = checker.TestAnalyzerResult + +// loadPackages uses go/packages to load a specified packages (from source, with +// dependencies) from dir, which is the root of a GOPATH-style project tree. +// loadPackages returns an error if any package had an error, or the pattern +// matched no packages. +func loadPackages(a *analysis.Analyzer, dir string, patterns ...string) ([]*packages.Package, error) { + env := []string{"GOPATH=" + dir, "GO111MODULE=off", "GOWORK=off"} // GOPATH mode + + // Undocumented module mode. Will be replaced by something better. + if _, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil { + gowork := filepath.Join(dir, "go.work") + if _, err := os.Stat(gowork); err != nil { + gowork = "off" + } + + env = []string{"GO111MODULE=on", "GOPROXY=off", "GOWORK=" + gowork} // module mode + } + + // packages.Load loads the real standard library, not a minimal + // fake version, which would be more efficient, especially if we + // have many small tests that import, say, net/http. + // However there is no easy way to make go/packages to consume + // a list of packages we generate and then do the parsing and + // typechecking, though this feature seems to be a recurring need. + + mode := packages.NeedName | packages.NeedFiles | packages.NeedCompiledGoFiles | packages.NeedImports | + packages.NeedTypes | packages.NeedTypesSizes | packages.NeedSyntax | packages.NeedTypesInfo | + packages.NeedDeps | packages.NeedModule + cfg := &packages.Config{ + Mode: mode, + Dir: dir, + Tests: true, + Env: append(os.Environ(), env...), + } + pkgs, err := packages.Load(cfg, patterns...) + if err != nil { + return nil, err + } + + // If any named package couldn't be loaded at all + // (e.g. the Name field is unset), fail fast. + for _, pkg := range pkgs { + if pkg.Name == "" { + return nil, fmt.Errorf("failed to load %q: Errors=%v", + pkg.PkgPath, pkg.Errors) + } + } + + // Do NOT print errors if the analyzer will continue running. + // It is incredibly confusing for tests to be printing to stderr + // willy-nilly instead of their test logs, especially when the + // errors are expected and are going to be fixed. + if !a.RunDespiteErrors { + if packages.PrintErrors(pkgs) > 0 { + return nil, fmt.Errorf("there were package loading errors (and RunDespiteErrors is false)") + } + } + + if len(pkgs) == 0 { + return nil, fmt.Errorf("no packages matched %s", patterns) + } + return pkgs, nil +} + +// check inspects an analysis pass on which the analysis has already +// been run, and verifies that all reported diagnostics and facts match +// specified by the contents of "// want ..." comments in the package's +// source files, which must have been parsed with comments enabled. +func check(t Testing, gopath string, pass *analysis.Pass, diagnostics []analysis.Diagnostic, facts map[types.Object][]analysis.Fact) { + type key struct { + file string + line int + } + + want := make(map[key][]expectation) + + // processComment parses expectations out of comments. + processComment := func(filename string, linenum int, text string) { + text = strings.TrimSpace(text) + + // Any comment starting with "want" is treated + // as an expectation, even without following whitespace. + if rest := strings.TrimPrefix(text, "want"); rest != text { + lineDelta, expects, err := parseExpectations(rest) + if err != nil { + t.Errorf("%s:%d: in 'want' comment: %s", filename, linenum, err) + return + } + if expects != nil { + want[key{filename, linenum + lineDelta}] = expects + } + } + } + + // Extract 'want' comments from parsed Go files. + for _, f := range pass.Files { + for _, cgroup := range f.Comments { + for _, c := range cgroup.List { + + text := strings.TrimPrefix(c.Text, "//") + if text == c.Text { // not a //-comment. + text = strings.TrimPrefix(text, "/*") + text = strings.TrimSuffix(text, "*/") + } + + // Hack: treat a comment of the form "//...// want..." + // or "/*...// want... */ + // as if it starts at 'want'. + // This allows us to add comments on comments, + // as required when testing the buildtag analyzer. + if i := strings.Index(text, "// want"); i >= 0 { + text = text[i+len("// "):] + } + + // It's tempting to compute the filename + // once outside the loop, but it's + // incorrect because it can change due + // to //line directives. + posn := pass.Fset.Position(c.Pos()) + filename := sanitize(gopath, posn.Filename) + processComment(filename, posn.Line, text) + } + } + } + + // Extract 'want' comments from non-Go files. + // TODO(adonovan): we may need to handle //line directives. + for _, filename := range pass.OtherFiles { + data, err := os.ReadFile(filename) + if err != nil { + t.Errorf("can't read '// want' comments from %s: %v", filename, err) + continue + } + filename := sanitize(gopath, filename) + linenum := 0 + for _, line := range strings.Split(string(data), "\n") { + linenum++ + + // Hack: treat a comment of the form "//...// want..." + // or "/*...// want... */ + // as if it starts at 'want'. + // This allows us to add comments on comments, + // as required when testing the buildtag analyzer. + if i := strings.Index(line, "// want"); i >= 0 { + line = line[i:] + } + + if i := strings.Index(line, "//"); i >= 0 { + line = line[i+len("//"):] + processComment(filename, linenum, line) + } + } + } + + checkMessage := func(posn token.Position, kind, name, message string) { + posn.Filename = sanitize(gopath, posn.Filename) + k := key{posn.Filename, posn.Line} + expects := want[k] + var unmatched []string + for i, exp := range expects { + if exp.kind == kind && exp.name == name { + if exp.rx.MatchString(message) { + // matched: remove the expectation. + expects[i] = expects[len(expects)-1] + expects = expects[:len(expects)-1] + want[k] = expects + return + } + unmatched = append(unmatched, fmt.Sprintf("%#q", exp.rx)) + } + } + if unmatched == nil { + t.Errorf("%v: unexpected %s: %v", posn, kind, message) + } else { + t.Errorf("%v: %s %q does not match pattern %s", + posn, kind, message, strings.Join(unmatched, " or ")) + } + } + + // Check the diagnostics match expectations. + for _, f := range diagnostics { + // TODO(matloob): Support ranges in analysistest. + posn := pass.Fset.Position(f.Pos) + checkMessage(posn, "diagnostic", "", f.Message) + } + + // Check the facts match expectations. + // Report errors in lexical order for determinism. + // (It's only deterministic within each file, not across files, + // because go/packages does not guarantee file.Pos is ascending + // across the files of a single compilation unit.) + var objects []types.Object + for obj := range facts { + objects = append(objects, obj) + } + sort.Slice(objects, func(i, j int) bool { + // Package facts compare less than object facts. + ip, jp := objects[i] == nil, objects[j] == nil // whether i, j is a package fact + if ip != jp { + return ip && !jp + } + return objects[i].Pos() < objects[j].Pos() + }) + for _, obj := range objects { + var posn token.Position + var name string + if obj != nil { + // Object facts are reported on the declaring line. + name = obj.Name() + posn = pass.Fset.Position(obj.Pos()) + } else { + // Package facts are reported at the start of the file. + name = "package" + posn = pass.Fset.Position(pass.Files[0].Pos()) + posn.Line = 1 + } + + for _, fact := range facts[obj] { + checkMessage(posn, "fact", name, fmt.Sprint(fact)) + } + } + + // Reject surplus expectations. + // + // Sometimes an Analyzer reports two similar diagnostics on a + // line with only one expectation. The reader may be confused by + // the error message. + // TODO(adonovan): print a better error: + // "got 2 diagnostics here; each one needs its own expectation". + var surplus []string + for key, expects := range want { + for _, exp := range expects { + err := fmt.Sprintf("%s:%d: no %s was reported matching %#q", key.file, key.line, exp.kind, exp.rx) + surplus = append(surplus, err) + } + } + sort.Strings(surplus) + for _, err := range surplus { + t.Errorf("%s", err) + } +} + +type expectation struct { + kind string // either "fact" or "diagnostic" + name string // name of object to which fact belongs, or "package" ("fact" only) + rx *regexp.Regexp +} + +func (ex expectation) String() string { + return fmt.Sprintf("%s %s:%q", ex.kind, ex.name, ex.rx) // for debugging +} + +// parseExpectations parses the content of a "// want ..." comment +// and returns the expectations, a mixture of diagnostics ("rx") and +// facts (name:"rx"). +func parseExpectations(text string) (lineDelta int, expects []expectation, err error) { + var scanErr string + sc := new(scanner.Scanner).Init(strings.NewReader(text)) + sc.Error = func(s *scanner.Scanner, msg string) { + scanErr = msg // e.g. bad string escape + } + sc.Mode = scanner.ScanIdents | scanner.ScanStrings | scanner.ScanRawStrings | scanner.ScanInts + + scanRegexp := func(tok rune) (*regexp.Regexp, error) { + if tok != scanner.String && tok != scanner.RawString { + return nil, fmt.Errorf("got %s, want regular expression", + scanner.TokenString(tok)) + } + pattern, _ := strconv.Unquote(sc.TokenText()) // can't fail + return regexp.Compile(pattern) + } + + for { + tok := sc.Scan() + switch tok { + case '+': + tok = sc.Scan() + if tok != scanner.Int { + return 0, nil, fmt.Errorf("got +%s, want +Int", scanner.TokenString(tok)) + } + lineDelta, _ = strconv.Atoi(sc.TokenText()) + case scanner.String, scanner.RawString: + rx, err := scanRegexp(tok) + if err != nil { + return 0, nil, err + } + expects = append(expects, expectation{"diagnostic", "", rx}) + + case scanner.Ident: + name := sc.TokenText() + tok = sc.Scan() + if tok != ':' { + return 0, nil, fmt.Errorf("got %s after %s, want ':'", + scanner.TokenString(tok), name) + } + tok = sc.Scan() + rx, err := scanRegexp(tok) + if err != nil { + return 0, nil, err + } + expects = append(expects, expectation{"fact", name, rx}) + + case scanner.EOF: + if scanErr != "" { + return 0, nil, fmt.Errorf("%s", scanErr) + } + return lineDelta, expects, nil + + default: + return 0, nil, fmt.Errorf("unexpected %s", scanner.TokenString(tok)) + } + } +} + +// sanitize removes the GOPATH portion of the filename, +// typically a gnarly /tmp directory, and returns the rest. +func sanitize(gopath, filename string) string { + prefix := gopath + string(os.PathSeparator) + "src" + string(os.PathSeparator) + return filepath.ToSlash(strings.TrimPrefix(filename, prefix)) +} diff --git a/vendor/golang.org/x/tools/go/analysis/diagnostic.go b/vendor/golang.org/x/tools/go/analysis/diagnostic.go new file mode 100644 index 00000000000..ee083a2d686 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/diagnostic.go @@ -0,0 +1,85 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package analysis + +import "go/token" + +// A Diagnostic is a message associated with a source location or range. +// +// An Analyzer may return a variety of diagnostics; the optional Category, +// which should be a constant, may be used to classify them. +// It is primarily intended to make it easy to look up documentation. +// +// All Pos values are interpreted relative to Pass.Fset. If End is +// provided, the diagnostic is specified to apply to the range between +// Pos and End. +type Diagnostic struct { + Pos token.Pos + End token.Pos // optional + Category string // optional + Message string + + // URL is the optional location of a web page that provides + // additional documentation for this diagnostic. + // + // If URL is empty but a Category is specified, then the + // Analysis driver should treat the URL as "#"+Category. + // + // The URL may be relative. If so, the base URL is that of the + // Analyzer that produced the diagnostic; + // see https://pkg.go.dev/net/url#URL.ResolveReference. + URL string + + // SuggestedFixes is an optional list of fixes to address the + // problem described by the diagnostic. Each one represents + // an alternative strategy; at most one may be applied. + // + // Fixes for different diagnostics should be treated as + // independent changes to the same baseline file state, + // analogous to a set of git commits all with the same parent. + // Combining fixes requires resolving any conflicts that + // arise, analogous to a git merge. + // Any conflicts that remain may be dealt with, depending on + // the tool, by discarding fixes, consulting the user, or + // aborting the operation. + SuggestedFixes []SuggestedFix + + // Related contains optional secondary positions and messages + // related to the primary diagnostic. + Related []RelatedInformation +} + +// RelatedInformation contains information related to a diagnostic. +// For example, a diagnostic that flags duplicated declarations of a +// variable may include one RelatedInformation per existing +// declaration. +type RelatedInformation struct { + Pos token.Pos + End token.Pos // optional + Message string +} + +// A SuggestedFix is a code change associated with a Diagnostic that a +// user can choose to apply to their code. Usually the SuggestedFix is +// meant to fix the issue flagged by the diagnostic. +// +// The TextEdits must not overlap, nor contain edits for other packages. +type SuggestedFix struct { + // A verb phrase describing the fix, to be shown to + // a user trying to decide whether to accept it. + // + // Example: "Remove the surplus argument" + Message string + TextEdits []TextEdit +} + +// A TextEdit represents the replacement of the code between Pos and End with the new text. +// Each TextEdit should apply to a single file. End should not be earlier in the file than Pos. +type TextEdit struct { + // For a pure insertion, End can either be set to Pos or token.NoPos. + Pos token.Pos + End token.Pos + NewText []byte +} diff --git a/vendor/golang.org/x/tools/go/analysis/doc.go b/vendor/golang.org/x/tools/go/analysis/doc.go new file mode 100644 index 00000000000..2a0aa577126 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/doc.go @@ -0,0 +1,317 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package analysis defines the interface between a modular static +analysis and an analysis driver program. + +# Background + +A static analysis is a function that inspects a package of Go code and +reports a set of diagnostics (typically mistakes in the code), and +perhaps produces other results as well, such as suggested refactorings +or other facts. An analysis that reports mistakes is informally called a +"checker". For example, the printf checker reports mistakes in +fmt.Printf format strings. + +A "modular" analysis is one that inspects one package at a time but can +save information from a lower-level package and use it when inspecting a +higher-level package, analogous to separate compilation in a toolchain. +The printf checker is modular: when it discovers that a function such as +log.Fatalf delegates to fmt.Printf, it records this fact, and checks +calls to that function too, including calls made from another package. + +By implementing a common interface, checkers from a variety of sources +can be easily selected, incorporated, and reused in a wide range of +driver programs including command-line tools (such as vet), text editors and +IDEs, build and test systems (such as go build, Bazel, or Buck), test +frameworks, code review tools, code-base indexers (such as SourceGraph), +documentation viewers (such as godoc), batch pipelines for large code +bases, and so on. + +# Analyzer + +The primary type in the API is [Analyzer]. An Analyzer statically +describes an analysis function: its name, documentation, flags, +relationship to other analyzers, and of course, its logic. + +To define an analysis, a user declares a (logically constant) variable +of type Analyzer. Here is a typical example from one of the analyzers in +the go/analysis/passes/ subdirectory: + + package unusedresult + + var Analyzer = &analysis.Analyzer{ + Name: "unusedresult", + Doc: "check for unused results of calls to some functions", + Run: run, + ... + } + + func run(pass *analysis.Pass) (interface{}, error) { + ... + } + +An analysis driver is a program such as vet that runs a set of +analyses and prints the diagnostics that they report. +The driver program must import the list of Analyzers it needs. +Typically each Analyzer resides in a separate package. +To add a new Analyzer to an existing driver, add another item to the list: + + import ( "unusedresult"; "nilness"; "printf" ) + + var analyses = []*analysis.Analyzer{ + unusedresult.Analyzer, + nilness.Analyzer, + printf.Analyzer, + } + +A driver may use the name, flags, and documentation to provide on-line +help that describes the analyses it performs. +The doc comment contains a brief one-line summary, +optionally followed by paragraphs of explanation. + +The [Analyzer] type has more fields besides those shown above: + + type Analyzer struct { + Name string + Doc string + Flags flag.FlagSet + Run func(*Pass) (interface{}, error) + RunDespiteErrors bool + ResultType reflect.Type + Requires []*Analyzer + FactTypes []Fact + } + +The Flags field declares a set of named (global) flag variables that +control analysis behavior. Unlike vet, analysis flags are not declared +directly in the command line FlagSet; it is up to the driver to set the +flag variables. A driver for a single analysis, a, might expose its flag +f directly on the command line as -f, whereas a driver for multiple +analyses might prefix the flag name by the analysis name (-a.f) to avoid +ambiguity. An IDE might expose the flags through a graphical interface, +and a batch pipeline might configure them from a config file. +See the "findcall" analyzer for an example of flags in action. + +The RunDespiteErrors flag indicates whether the analysis is equipped to +handle ill-typed code. If not, the driver will skip the analysis if +there were parse or type errors. +The optional ResultType field specifies the type of the result value +computed by this analysis and made available to other analyses. +The Requires field specifies a list of analyses upon which +this one depends and whose results it may access, and it constrains the +order in which a driver may run analyses. +The FactTypes field is discussed in the section on Modularity. +The analysis package provides a Validate function to perform basic +sanity checks on an Analyzer, such as that its Requires graph is +acyclic, its fact and result types are unique, and so on. + +Finally, the Run field contains a function to be called by the driver to +execute the analysis on a single package. The driver passes it an +instance of the Pass type. + +# Pass + +A [Pass] describes a single unit of work: the application of a particular +Analyzer to a particular package of Go code. +The Pass provides information to the Analyzer's Run function about the +package being analyzed, and provides operations to the Run function for +reporting diagnostics and other information back to the driver. + + type Pass struct { + Fset *token.FileSet + Files []*ast.File + OtherFiles []string + IgnoredFiles []string + Pkg *types.Package + TypesInfo *types.Info + ResultOf map[*Analyzer]interface{} + Report func(Diagnostic) + ... + } + +The Fset, Files, Pkg, and TypesInfo fields provide the syntax trees, +type information, and source positions for a single package of Go code. + +The OtherFiles field provides the names of non-Go +files such as assembly that are part of this package. +Similarly, the IgnoredFiles field provides the names of Go and non-Go +source files that are not part of this package with the current build +configuration but may be part of other build configurations. +The contents of these files may be read using Pass.ReadFile; +see the "asmdecl" or "buildtags" analyzers for examples of loading +non-Go files and reporting diagnostics against them. + +The ResultOf field provides the results computed by the analyzers +required by this one, as expressed in its Analyzer.Requires field. The +driver runs the required analyzers first and makes their results +available in this map. Each Analyzer must return a value of the type +described in its Analyzer.ResultType field. +For example, the "ctrlflow" analyzer returns a *ctrlflow.CFGs, which +provides a control-flow graph for each function in the package (see +golang.org/x/tools/go/cfg); the "inspect" analyzer returns a value that +enables other Analyzers to traverse the syntax trees of the package more +efficiently; and the "buildssa" analyzer constructs an SSA-form +intermediate representation. +Each of these Analyzers extends the capabilities of later Analyzers +without adding a dependency to the core API, so an analysis tool pays +only for the extensions it needs. + +The Report function emits a diagnostic, a message associated with a +source position. For most analyses, diagnostics are their primary +result. +For convenience, Pass provides a helper method, Reportf, to report a new +diagnostic by formatting a string. +Diagnostic is defined as: + + type Diagnostic struct { + Pos token.Pos + Category string // optional + Message string + } + +The optional Category field is a short identifier that classifies the +kind of message when an analysis produces several kinds of diagnostic. + +The [Diagnostic] struct does not have a field to indicate its severity +because opinions about the relative importance of Analyzers and their +diagnostics vary widely among users. The design of this framework does +not hold each Analyzer responsible for identifying the severity of its +diagnostics. Instead, we expect that drivers will allow the user to +customize the filtering and prioritization of diagnostics based on the +producing Analyzer and optional Category, according to the user's +preferences. + +Most Analyzers inspect typed Go syntax trees, but a few, such as asmdecl +and buildtag, inspect the raw text of Go source files or even non-Go +files such as assembly. To report a diagnostic against a line of a +raw text file, use the following sequence: + + content, err := pass.ReadFile(filename) + if err != nil { ... } + tf := fset.AddFile(filename, -1, len(content)) + tf.SetLinesForContent(content) + ... + pass.Reportf(tf.LineStart(line), "oops") + +# Modular analysis with Facts + +To improve efficiency and scalability, large programs are routinely +built using separate compilation: units of the program are compiled +separately, and recompiled only when one of their dependencies changes; +independent modules may be compiled in parallel. The same technique may +be applied to static analyses, for the same benefits. Such analyses are +described as "modular". + +A compiler’s type checker is an example of a modular static analysis. +Many other checkers we would like to apply to Go programs can be +understood as alternative or non-standard type systems. For example, +vet's printf checker infers whether a function has the "printf wrapper" +type, and it applies stricter checks to calls of such functions. In +addition, it records which functions are printf wrappers for use by +later analysis passes to identify other printf wrappers by induction. +A result such as “f is a printf wrapper” that is not interesting by +itself but serves as a stepping stone to an interesting result (such as +a diagnostic) is called a [Fact]. + +The analysis API allows an analysis to define new types of facts, to +associate facts of these types with objects (named entities) declared +within the current package, or with the package as a whole, and to query +for an existing fact of a given type associated with an object or +package. + +An Analyzer that uses facts must declare their types: + + var Analyzer = &analysis.Analyzer{ + Name: "printf", + FactTypes: []analysis.Fact{new(isWrapper)}, + ... + } + + type isWrapper struct{} // => *types.Func f “is a printf wrapper” + +The driver program ensures that facts for a pass’s dependencies are +generated before analyzing the package and is responsible for propagating +facts from one package to another, possibly across address spaces. +Consequently, Facts must be serializable. The API requires that drivers +use the gob encoding, an efficient, robust, self-describing binary +protocol. A fact type may implement the GobEncoder/GobDecoder interfaces +if the default encoding is unsuitable. Facts should be stateless. +Because serialized facts may appear within build outputs, the gob encoding +of a fact must be deterministic, to avoid spurious cache misses in +build systems that use content-addressable caches. +The driver makes a single call to the gob encoder for all facts +exported by a given analysis pass, so that the topology of +shared data structures referenced by multiple facts is preserved. + +The Pass type has functions to import and export facts, +associated either with an object or with a package: + + type Pass struct { + ... + ExportObjectFact func(types.Object, Fact) + ImportObjectFact func(types.Object, Fact) bool + + ExportPackageFact func(fact Fact) + ImportPackageFact func(*types.Package, Fact) bool + } + +An Analyzer may only export facts associated with the current package or +its objects, though it may import facts from any package or object that +is an import dependency of the current package. + +Conceptually, ExportObjectFact(obj, fact) inserts fact into a hidden map keyed by +the pair (obj, TypeOf(fact)), and the ImportObjectFact function +retrieves the entry from this map and copies its value into the variable +pointed to by fact. This scheme assumes that the concrete type of fact +is a pointer; this assumption is checked by the Validate function. +See the "printf" analyzer for an example of object facts in action. + +Some driver implementations (such as those based on Bazel and Blaze) do +not currently apply analyzers to packages of the standard library. +Therefore, for best results, analyzer authors should not rely on +analysis facts being available for standard packages. +For example, although the printf checker is capable of deducing during +analysis of the log package that log.Printf is a printf wrapper, +this fact is built in to the analyzer so that it correctly checks +calls to log.Printf even when run in a driver that does not apply +it to standard packages. We would like to remove this limitation in future. + +# Testing an Analyzer + +The analysistest subpackage provides utilities for testing an Analyzer. +In a few lines of code, it is possible to run an analyzer on a package +of testdata files and check that it reported all the expected +diagnostics and facts (and no more). Expectations are expressed using +"// want ..." comments in the input code. + +# Standalone commands + +Analyzers are provided in the form of packages that a driver program is +expected to import. The vet command imports a set of several analyzers, +but users may wish to define their own analysis commands that perform +additional checks. To simplify the task of creating an analysis command, +either for a single analyzer or for a whole suite, we provide the +singlechecker and multichecker subpackages. + +The singlechecker package provides the main function for a command that +runs one analyzer. By convention, each analyzer such as +go/analysis/passes/findcall should be accompanied by a singlechecker-based +command such as go/analysis/passes/findcall/cmd/findcall, defined in its +entirety as: + + package main + + import ( + "golang.org/x/tools/go/analysis/passes/findcall" + "golang.org/x/tools/go/analysis/singlechecker" + ) + + func main() { singlechecker.Main(findcall.Analyzer) } + +A tool that provides multiple analyzers can use multichecker in a +similar way, giving it the list of Analyzers. +*/ +package analysis diff --git a/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go b/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go new file mode 100644 index 00000000000..ff14ff58f9c --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go @@ -0,0 +1,447 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package analysisflags defines helpers for processing flags of +// analysis driver tools. +package analysisflags + +import ( + "crypto/sha256" + "encoding/gob" + "encoding/json" + "flag" + "fmt" + "go/token" + "io" + "log" + "os" + "strconv" + "strings" + + "golang.org/x/tools/go/analysis" +) + +// flags common to all {single,multi,unit}checkers. +var ( + JSON = false // -json + Context = -1 // -c=N: if N>0, display offending line plus N lines of context +) + +// Parse creates a flag for each of the analyzer's flags, +// including (in multi mode) a flag named after the analyzer, +// parses the flags, then filters and returns the list of +// analyzers enabled by flags. +// +// The result is intended to be passed to unitchecker.Run or checker.Run. +// Use in unitchecker.Run will gob.Register all fact types for the returned +// graph of analyzers but of course not the ones only reachable from +// dropped analyzers. To avoid inconsistency about which gob types are +// registered from run to run, Parse itself gob.Registers all the facts +// only reachable from dropped analyzers. +// This is not a particularly elegant API, but this is an internal package. +func Parse(analyzers []*analysis.Analyzer, multi bool) []*analysis.Analyzer { + // Connect each analysis flag to the command line as -analysis.flag. + enabled := make(map[*analysis.Analyzer]*triState) + for _, a := range analyzers { + var prefix string + + // Add -NAME flag to enable it. + if multi { + prefix = a.Name + "." + + enable := new(triState) + enableUsage := "enable " + a.Name + " analysis" + flag.Var(enable, a.Name, enableUsage) + enabled[a] = enable + } + + a.Flags.VisitAll(func(f *flag.Flag) { + if !multi && flag.Lookup(f.Name) != nil { + log.Printf("%s flag -%s would conflict with driver; skipping", a.Name, f.Name) + return + } + + name := prefix + f.Name + flag.Var(f.Value, name, f.Usage) + }) + } + + // standard flags: -flags, -V. + printflags := flag.Bool("flags", false, "print analyzer flags in JSON") + addVersionFlag() + + // flags common to all checkers + flag.BoolVar(&JSON, "json", JSON, "emit JSON output") + flag.IntVar(&Context, "c", Context, `display offending line with this many lines of context`) + + // Add shims for legacy vet flags to enable existing + // scripts that run vet to continue to work. + _ = flag.Bool("source", false, "no effect (deprecated)") + _ = flag.Bool("v", false, "no effect (deprecated)") + _ = flag.Bool("all", false, "no effect (deprecated)") + _ = flag.String("tags", "", "no effect (deprecated)") + for old, new := range vetLegacyFlags { + newFlag := flag.Lookup(new) + if newFlag != nil && flag.Lookup(old) == nil { + flag.Var(newFlag.Value, old, "deprecated alias for -"+new) + } + } + + flag.Parse() // (ExitOnError) + + // -flags: print flags so that go vet knows which ones are legitimate. + if *printflags { + printFlags() + os.Exit(0) + } + + everything := expand(analyzers) + + // If any -NAME flag is true, run only those analyzers. Otherwise, + // if any -NAME flag is false, run all but those analyzers. + if multi { + var hasTrue, hasFalse bool + for _, ts := range enabled { + switch *ts { + case setTrue: + hasTrue = true + case setFalse: + hasFalse = true + } + } + + var keep []*analysis.Analyzer + if hasTrue { + for _, a := range analyzers { + if *enabled[a] == setTrue { + keep = append(keep, a) + } + } + analyzers = keep + } else if hasFalse { + for _, a := range analyzers { + if *enabled[a] != setFalse { + keep = append(keep, a) + } + } + analyzers = keep + } + } + + // Register fact types of skipped analyzers + // in case we encounter them in imported files. + kept := expand(analyzers) + for a := range everything { + if !kept[a] { + for _, f := range a.FactTypes { + gob.Register(f) + } + } + } + + return analyzers +} + +func expand(analyzers []*analysis.Analyzer) map[*analysis.Analyzer]bool { + seen := make(map[*analysis.Analyzer]bool) + var visitAll func([]*analysis.Analyzer) + visitAll = func(analyzers []*analysis.Analyzer) { + for _, a := range analyzers { + if !seen[a] { + seen[a] = true + visitAll(a.Requires) + } + } + } + visitAll(analyzers) + return seen +} + +func printFlags() { + type jsonFlag struct { + Name string + Bool bool + Usage string + } + var flags []jsonFlag = nil + flag.VisitAll(func(f *flag.Flag) { + // Don't report {single,multi}checker debugging + // flags or fix as these have no effect on unitchecker + // (as invoked by 'go vet'). + switch f.Name { + case "debug", "cpuprofile", "memprofile", "trace", "fix": + return + } + + b, ok := f.Value.(interface{ IsBoolFlag() bool }) + isBool := ok && b.IsBoolFlag() + flags = append(flags, jsonFlag{f.Name, isBool, f.Usage}) + }) + data, err := json.MarshalIndent(flags, "", "\t") + if err != nil { + log.Fatal(err) + } + os.Stdout.Write(data) +} + +// addVersionFlag registers a -V flag that, if set, +// prints the executable version and exits 0. +// +// If the -V flag already exists — for example, because it was already +// registered by a call to cmd/internal/objabi.AddVersionFlag — then +// addVersionFlag does nothing. +func addVersionFlag() { + if flag.Lookup("V") == nil { + flag.Var(versionFlag{}, "V", "print version and exit") + } +} + +// versionFlag minimally complies with the -V protocol required by "go vet". +type versionFlag struct{} + +func (versionFlag) IsBoolFlag() bool { return true } +func (versionFlag) Get() interface{} { return nil } +func (versionFlag) String() string { return "" } +func (versionFlag) Set(s string) error { + if s != "full" { + log.Fatalf("unsupported flag value: -V=%s (use -V=full)", s) + } + + // This replicates the minimal subset of + // cmd/internal/objabi.AddVersionFlag, which is private to the + // go tool yet forms part of our command-line interface. + // TODO(adonovan): clarify the contract. + + // Print the tool version so the build system can track changes. + // Formats: + // $progname version devel ... buildID=... + // $progname version go1.9.1 + progname, err := os.Executable() + if err != nil { + return err + } + f, err := os.Open(progname) + if err != nil { + log.Fatal(err) + } + h := sha256.New() + if _, err := io.Copy(h, f); err != nil { + log.Fatal(err) + } + f.Close() + fmt.Printf("%s version devel comments-go-here buildID=%02x\n", + progname, string(h.Sum(nil))) + os.Exit(0) + return nil +} + +// A triState is a boolean that knows whether +// it has been set to either true or false. +// It is used to identify whether a flag appears; +// the standard boolean flag cannot +// distinguish missing from unset. +// It also satisfies flag.Value. +type triState int + +const ( + unset triState = iota + setTrue + setFalse +) + +func triStateFlag(name string, value triState, usage string) *triState { + flag.Var(&value, name, usage) + return &value +} + +// triState implements flag.Value, flag.Getter, and flag.boolFlag. +// They work like boolean flags: we can say vet -printf as well as vet -printf=true +func (ts *triState) Get() interface{} { + return *ts == setTrue +} + +func (ts triState) isTrue() bool { + return ts == setTrue +} + +func (ts *triState) Set(value string) error { + b, err := strconv.ParseBool(value) + if err != nil { + // This error message looks poor but package "flag" adds + // "invalid boolean value %q for -NAME: %s" + return fmt.Errorf("want true or false") + } + if b { + *ts = setTrue + } else { + *ts = setFalse + } + return nil +} + +func (ts *triState) String() string { + switch *ts { + case unset: + return "true" + case setTrue: + return "true" + case setFalse: + return "false" + } + panic("not reached") +} + +func (ts triState) IsBoolFlag() bool { + return true +} + +// Legacy flag support + +// vetLegacyFlags maps flags used by legacy vet to their corresponding +// new names. The old names will continue to work. +var vetLegacyFlags = map[string]string{ + // Analyzer name changes + "bool": "bools", + "buildtags": "buildtag", + "methods": "stdmethods", + "rangeloops": "loopclosure", + + // Analyzer flags + "compositewhitelist": "composites.whitelist", + "printfuncs": "printf.funcs", + "shadowstrict": "shadow.strict", + "unusedfuncs": "unusedresult.funcs", + "unusedstringmethods": "unusedresult.stringmethods", +} + +// ---- output helpers common to all drivers ---- + +// PrintPlain prints a diagnostic in plain text form, +// with context specified by the -c flag. +func PrintPlain(fset *token.FileSet, diag analysis.Diagnostic) { + posn := fset.Position(diag.Pos) + fmt.Fprintf(os.Stderr, "%s: %s\n", posn, diag.Message) + + // -c=N: show offending line plus N lines of context. + if Context >= 0 { + posn := fset.Position(diag.Pos) + end := fset.Position(diag.End) + if !end.IsValid() { + end = posn + } + data, _ := os.ReadFile(posn.Filename) + lines := strings.Split(string(data), "\n") + for i := posn.Line - Context; i <= end.Line+Context; i++ { + if 1 <= i && i <= len(lines) { + fmt.Fprintf(os.Stderr, "%d\t%s\n", i, lines[i-1]) + } + } + } +} + +// A JSONTree is a mapping from package ID to analysis name to result. +// Each result is either a jsonError or a list of JSONDiagnostic. +type JSONTree map[string]map[string]interface{} + +// A TextEdit describes the replacement of a portion of a file. +// Start and End are zero-based half-open indices into the original byte +// sequence of the file, and New is the new text. +type JSONTextEdit struct { + Filename string `json:"filename"` + Start int `json:"start"` + End int `json:"end"` + New string `json:"new"` +} + +// A JSONSuggestedFix describes an edit that should be applied as a whole or not +// at all. It might contain multiple TextEdits/text_edits if the SuggestedFix +// consists of multiple non-contiguous edits. +type JSONSuggestedFix struct { + Message string `json:"message"` + Edits []JSONTextEdit `json:"edits"` +} + +// A JSONDiagnostic describes the JSON schema of an analysis.Diagnostic. +// +// TODO(matloob): include End position if present. +type JSONDiagnostic struct { + Category string `json:"category,omitempty"` + Posn string `json:"posn"` // e.g. "file.go:line:column" + Message string `json:"message"` + SuggestedFixes []JSONSuggestedFix `json:"suggested_fixes,omitempty"` + Related []JSONRelatedInformation `json:"related,omitempty"` +} + +// A JSONRelated describes a secondary position and message related to +// a primary diagnostic. +// +// TODO(adonovan): include End position if present. +type JSONRelatedInformation struct { + Posn string `json:"posn"` // e.g. "file.go:line:column" + Message string `json:"message"` +} + +// Add adds the result of analysis 'name' on package 'id'. +// The result is either a list of diagnostics or an error. +func (tree JSONTree) Add(fset *token.FileSet, id, name string, diags []analysis.Diagnostic, err error) { + var v interface{} + if err != nil { + type jsonError struct { + Err string `json:"error"` + } + v = jsonError{err.Error()} + } else if len(diags) > 0 { + diagnostics := make([]JSONDiagnostic, 0, len(diags)) + for _, f := range diags { + var fixes []JSONSuggestedFix + for _, fix := range f.SuggestedFixes { + var edits []JSONTextEdit + for _, edit := range fix.TextEdits { + edits = append(edits, JSONTextEdit{ + Filename: fset.Position(edit.Pos).Filename, + Start: fset.Position(edit.Pos).Offset, + End: fset.Position(edit.End).Offset, + New: string(edit.NewText), + }) + } + fixes = append(fixes, JSONSuggestedFix{ + Message: fix.Message, + Edits: edits, + }) + } + var related []JSONRelatedInformation + for _, r := range f.Related { + related = append(related, JSONRelatedInformation{ + Posn: fset.Position(r.Pos).String(), + Message: r.Message, + }) + } + jdiag := JSONDiagnostic{ + Category: f.Category, + Posn: fset.Position(f.Pos).String(), + Message: f.Message, + SuggestedFixes: fixes, + Related: related, + } + diagnostics = append(diagnostics, jdiag) + } + v = diagnostics + } + if v != nil { + m, ok := tree[id] + if !ok { + m = make(map[string]interface{}) + tree[id] = m + } + m[name] = v + } +} + +func (tree JSONTree) Print() { + data, err := json.MarshalIndent(tree, "", "\t") + if err != nil { + log.Panicf("internal error: JSON marshaling failed: %v", err) + } + fmt.Printf("%s\n", data) +} diff --git a/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/help.go b/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/help.go new file mode 100644 index 00000000000..ce92892c817 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/help.go @@ -0,0 +1,96 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package analysisflags + +import ( + "flag" + "fmt" + "log" + "os" + "sort" + "strings" + + "golang.org/x/tools/go/analysis" +) + +const help = `PROGNAME is a tool for static analysis of Go programs. + +PROGNAME examines Go source code and reports suspicious constructs, +such as Printf calls whose arguments do not align with the format +string. It uses heuristics that do not guarantee all reports are +genuine problems, but it can find errors not caught by the compilers. +` + +// Help implements the help subcommand for a multichecker or unitchecker +// style command. The optional args specify the analyzers to describe. +// Help calls log.Fatal if no such analyzer exists. +func Help(progname string, analyzers []*analysis.Analyzer, args []string) { + // No args: show summary of all analyzers. + if len(args) == 0 { + fmt.Println(strings.Replace(help, "PROGNAME", progname, -1)) + fmt.Println("Registered analyzers:") + fmt.Println() + sort.Slice(analyzers, func(i, j int) bool { + return analyzers[i].Name < analyzers[j].Name + }) + for _, a := range analyzers { + title := strings.Split(a.Doc, "\n\n")[0] + fmt.Printf(" %-12s %s\n", a.Name, title) + } + fmt.Println("\nBy default all analyzers are run.") + fmt.Println("To select specific analyzers, use the -NAME flag for each one,") + fmt.Println(" or -NAME=false to run all analyzers not explicitly disabled.") + + // Show only the core command-line flags. + fmt.Println("\nCore flags:") + fmt.Println() + fs := flag.NewFlagSet("", flag.ExitOnError) + flag.VisitAll(func(f *flag.Flag) { + if !strings.Contains(f.Name, ".") { + fs.Var(f.Value, f.Name, f.Usage) + } + }) + fs.SetOutput(os.Stdout) + fs.PrintDefaults() + + fmt.Printf("\nTo see details and flags of a specific analyzer, run '%s help name'.\n", progname) + + return + } + + // Show help on specific analyzer(s). +outer: + for _, arg := range args { + for _, a := range analyzers { + if a.Name == arg { + paras := strings.Split(a.Doc, "\n\n") + title := paras[0] + fmt.Printf("%s: %s\n", a.Name, title) + + // Show only the flags relating to this analysis, + // properly prefixed. + first := true + fs := flag.NewFlagSet(a.Name, flag.ExitOnError) + a.Flags.VisitAll(func(f *flag.Flag) { + if first { + first = false + fmt.Println("\nAnalyzer flags:") + fmt.Println() + } + fs.Var(f.Value, a.Name+"."+f.Name, f.Usage) + }) + fs.SetOutput(os.Stdout) + fs.PrintDefaults() + + if len(paras) > 1 { + fmt.Printf("\n%s\n", strings.Join(paras[1:], "\n\n")) + } + + continue outer + } + } + log.Fatalf("Analyzer %q not registered", arg) + } +} diff --git a/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/url.go b/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/url.go new file mode 100644 index 00000000000..26a917a9919 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/url.go @@ -0,0 +1,33 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package analysisflags + +import ( + "fmt" + "net/url" + + "golang.org/x/tools/go/analysis" +) + +// ResolveURL resolves the URL field for a Diagnostic from an Analyzer +// and returns the URL. See Diagnostic.URL for details. +func ResolveURL(a *analysis.Analyzer, d analysis.Diagnostic) (string, error) { + if d.URL == "" && d.Category == "" && a.URL == "" { + return "", nil // do nothing + } + raw := d.URL + if d.URL == "" && d.Category != "" { + raw = "#" + d.Category + } + u, err := url.Parse(raw) + if err != nil { + return "", fmt.Errorf("invalid Diagnostic.URL %q: %s", raw, err) + } + base, err := url.Parse(a.URL) + if err != nil { + return "", fmt.Errorf("invalid Analyzer.URL %q: %s", a.URL, err) + } + return base.ResolveReference(u).String(), nil +} diff --git a/vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go b/vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go new file mode 100644 index 00000000000..8a802831c39 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go @@ -0,0 +1,984 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package checker defines the implementation of the checker commands. +// The same code drives the multi-analysis driver, the single-analysis +// driver that is conventionally provided for convenience along with +// each analysis package, and the test driver. +package checker + +import ( + "bytes" + "encoding/gob" + "flag" + "fmt" + "go/format" + "go/token" + "go/types" + "log" + "os" + "reflect" + "runtime" + "runtime/pprof" + "runtime/trace" + "sort" + "strings" + "sync" + "time" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/internal/analysisflags" + "golang.org/x/tools/go/packages" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/diff" + "golang.org/x/tools/internal/robustio" +) + +var ( + // Debug is a set of single-letter flags: + // + // f show [f]acts as they are created + // p disable [p]arallel execution of analyzers + // s do additional [s]anity checks on fact types and serialization + // t show [t]iming info (NB: use 'p' flag to avoid GC/scheduler noise) + // v show [v]erbose logging + // + Debug = "" + + // Log files for optional performance tracing. + CPUProfile, MemProfile, Trace string + + // IncludeTests indicates whether test files should be analyzed too. + IncludeTests = true + + // Fix determines whether to apply all suggested fixes. + Fix bool +) + +// RegisterFlags registers command-line flags used by the analysis driver. +func RegisterFlags() { + // When adding flags here, remember to update + // the list of suppressed flags in analysisflags. + + flag.StringVar(&Debug, "debug", Debug, `debug flags, any subset of "fpstv"`) + + flag.StringVar(&CPUProfile, "cpuprofile", "", "write CPU profile to this file") + flag.StringVar(&MemProfile, "memprofile", "", "write memory profile to this file") + flag.StringVar(&Trace, "trace", "", "write trace log to this file") + flag.BoolVar(&IncludeTests, "test", IncludeTests, "indicates whether test files should be analyzed, too") + + flag.BoolVar(&Fix, "fix", false, "apply all suggested fixes") +} + +// Run loads the packages specified by args using go/packages, +// then applies the specified analyzers to them. +// Analysis flags must already have been set. +// Analyzers must be valid according to [analysis.Validate]. +// It provides most of the logic for the main functions of both the +// singlechecker and the multi-analysis commands. +// It returns the appropriate exit code. +func Run(args []string, analyzers []*analysis.Analyzer) (exitcode int) { + if CPUProfile != "" { + f, err := os.Create(CPUProfile) + if err != nil { + log.Fatal(err) + } + if err := pprof.StartCPUProfile(f); err != nil { + log.Fatal(err) + } + // NB: profile won't be written in case of error. + defer pprof.StopCPUProfile() + } + + if Trace != "" { + f, err := os.Create(Trace) + if err != nil { + log.Fatal(err) + } + if err := trace.Start(f); err != nil { + log.Fatal(err) + } + // NB: trace log won't be written in case of error. + defer func() { + trace.Stop() + log.Printf("To view the trace, run:\n$ go tool trace view %s", Trace) + }() + } + + if MemProfile != "" { + f, err := os.Create(MemProfile) + if err != nil { + log.Fatal(err) + } + // NB: memprofile won't be written in case of error. + defer func() { + runtime.GC() // get up-to-date statistics + if err := pprof.WriteHeapProfile(f); err != nil { + log.Fatalf("Writing memory profile: %v", err) + } + f.Close() + }() + } + + // Load the packages. + if dbg('v') { + log.SetPrefix("") + log.SetFlags(log.Lmicroseconds) // display timing + log.Printf("load %s", args) + } + + // Optimization: if the selected analyzers don't produce/consume + // facts, we need source only for the initial packages. + allSyntax := needFacts(analyzers) + initial, err := load(args, allSyntax) + if err != nil { + log.Print(err) + return 1 + } + + pkgsExitCode := 0 + // Print package and module errors regardless of RunDespiteErrors. + // Do not exit if there are errors, yet. + if n := packages.PrintErrors(initial); n > 0 { + pkgsExitCode = 1 + } + + // Run the analyzers. On each package with (transitive) + // errors, we run only the subset of analyzers that are + // marked (and whose transitive requirements are also + // marked) with RunDespiteErrors. + roots := analyze(initial, analyzers) + + // Apply fixes. + if Fix { + if err := applyFixes(roots); err != nil { + // Fail when applying fixes failed. + log.Print(err) + return 1 + } + } + + // Print the results. If !RunDespiteErrors and there + // are errors in the packages, this will have 0 exit + // code. Otherwise, we prefer to return exit code + // indicating diagnostics. + if diagExitCode := printDiagnostics(roots); diagExitCode != 0 { + return diagExitCode // there were diagnostics + } + return pkgsExitCode // package errors but no diagnostics +} + +// load loads the initial packages. Returns only top-level loading +// errors. Does not consider errors in packages. +func load(patterns []string, allSyntax bool) ([]*packages.Package, error) { + mode := packages.LoadSyntax + if allSyntax { + mode = packages.LoadAllSyntax + } + mode |= packages.NeedModule + conf := packages.Config{ + Mode: mode, + Tests: IncludeTests, + } + initial, err := packages.Load(&conf, patterns...) + if err == nil && len(initial) == 0 { + err = fmt.Errorf("%s matched no packages", strings.Join(patterns, " ")) + } + return initial, err +} + +// TestAnalyzer applies an analyzer to a set of packages (and their +// dependencies if necessary) and returns the results. +// The analyzer must be valid according to [analysis.Validate]. +// +// Facts about pkg are returned in a map keyed by object; package facts +// have a nil key. +// +// This entry point is used only by analysistest. +func TestAnalyzer(a *analysis.Analyzer, pkgs []*packages.Package) []*TestAnalyzerResult { + var results []*TestAnalyzerResult + for _, act := range analyze(pkgs, []*analysis.Analyzer{a}) { + facts := make(map[types.Object][]analysis.Fact) + for key, fact := range act.objectFacts { + if key.obj.Pkg() == act.pass.Pkg { + facts[key.obj] = append(facts[key.obj], fact) + } + } + for key, fact := range act.packageFacts { + if key.pkg == act.pass.Pkg { + facts[nil] = append(facts[nil], fact) + } + } + + results = append(results, &TestAnalyzerResult{act.pass, act.diagnostics, facts, act.result, act.err}) + } + return results +} + +type TestAnalyzerResult struct { + Pass *analysis.Pass + Diagnostics []analysis.Diagnostic + Facts map[types.Object][]analysis.Fact + Result interface{} + Err error +} + +func analyze(pkgs []*packages.Package, analyzers []*analysis.Analyzer) []*action { + // Construct the action graph. + if dbg('v') { + log.Printf("building graph of analysis passes") + } + + // Each graph node (action) is one unit of analysis. + // Edges express package-to-package (vertical) dependencies, + // and analysis-to-analysis (horizontal) dependencies. + type key struct { + *analysis.Analyzer + *packages.Package + } + actions := make(map[key]*action) + + var mkAction func(a *analysis.Analyzer, pkg *packages.Package) *action + mkAction = func(a *analysis.Analyzer, pkg *packages.Package) *action { + k := key{a, pkg} + act, ok := actions[k] + if !ok { + act = &action{a: a, pkg: pkg} + + // Add a dependency on each required analyzers. + for _, req := range a.Requires { + act.deps = append(act.deps, mkAction(req, pkg)) + } + + // An analysis that consumes/produces facts + // must run on the package's dependencies too. + if len(a.FactTypes) > 0 { + paths := make([]string, 0, len(pkg.Imports)) + for path := range pkg.Imports { + paths = append(paths, path) + } + sort.Strings(paths) // for determinism + for _, path := range paths { + dep := mkAction(a, pkg.Imports[path]) + act.deps = append(act.deps, dep) + } + } + + actions[k] = act + } + return act + } + + // Build nodes for initial packages. + var roots []*action + for _, a := range analyzers { + for _, pkg := range pkgs { + root := mkAction(a, pkg) + root.isroot = true + roots = append(roots, root) + } + } + + // Execute the graph in parallel. + execAll(roots) + + return roots +} + +func applyFixes(roots []*action) error { + // visit all of the actions and accumulate the suggested edits. + paths := make(map[robustio.FileID]string) + editsByAction := make(map[robustio.FileID]map[*action][]diff.Edit) + visited := make(map[*action]bool) + var apply func(*action) error + var visitAll func(actions []*action) error + visitAll = func(actions []*action) error { + for _, act := range actions { + if !visited[act] { + visited[act] = true + if err := visitAll(act.deps); err != nil { + return err + } + if err := apply(act); err != nil { + return err + } + } + } + return nil + } + + apply = func(act *action) error { + editsForTokenFile := make(map[*token.File][]diff.Edit) + for _, diag := range act.diagnostics { + for _, sf := range diag.SuggestedFixes { + for _, edit := range sf.TextEdits { + // Validate the edit. + // Any error here indicates a bug in the analyzer. + start, end := edit.Pos, edit.End + file := act.pkg.Fset.File(start) + if file == nil { + return fmt.Errorf("analysis %q suggests invalid fix: missing file info for pos (%v)", + act.a.Name, start) + } + if !end.IsValid() { + end = start + } + if start > end { + return fmt.Errorf("analysis %q suggests invalid fix: pos (%v) > end (%v)", + act.a.Name, start, end) + } + if eof := token.Pos(file.Base() + file.Size()); end > eof { + return fmt.Errorf("analysis %q suggests invalid fix: end (%v) past end of file (%v)", + act.a.Name, end, eof) + } + edit := diff.Edit{ + Start: file.Offset(start), + End: file.Offset(end), + New: string(edit.NewText), + } + editsForTokenFile[file] = append(editsForTokenFile[file], edit) + } + } + } + + for f, edits := range editsForTokenFile { + id, _, err := robustio.GetFileID(f.Name()) + if err != nil { + return err + } + if _, hasId := paths[id]; !hasId { + paths[id] = f.Name() + editsByAction[id] = make(map[*action][]diff.Edit) + } + editsByAction[id][act] = edits + } + return nil + } + + if err := visitAll(roots); err != nil { + return err + } + + // Validate and group the edits to each actual file. + editsByPath := make(map[string][]diff.Edit) + for id, actToEdits := range editsByAction { + path := paths[id] + actions := make([]*action, 0, len(actToEdits)) + for act := range actToEdits { + actions = append(actions, act) + } + + // Does any action create conflicting edits? + for _, act := range actions { + edits := actToEdits[act] + if _, invalid := validateEdits(edits); invalid > 0 { + name, x, y := act.a.Name, edits[invalid-1], edits[invalid] + return diff3Conflict(path, name, name, []diff.Edit{x}, []diff.Edit{y}) + } + } + + // Does any pair of different actions create edits that conflict? + for j := range actions { + for k := range actions[:j] { + x, y := actions[j], actions[k] + if x.a.Name > y.a.Name { + x, y = y, x + } + xedits, yedits := actToEdits[x], actToEdits[y] + combined := append(xedits, yedits...) + if _, invalid := validateEdits(combined); invalid > 0 { + // TODO: consider applying each action's consistent list of edits entirely, + // and then using a three-way merge (such as GNU diff3) on the resulting + // files to report more precisely the parts that actually conflict. + return diff3Conflict(path, x.a.Name, y.a.Name, xedits, yedits) + } + } + } + + var edits []diff.Edit + for act := range actToEdits { + edits = append(edits, actToEdits[act]...) + } + editsByPath[path], _ = validateEdits(edits) // remove duplicates. already validated. + } + + // Now we've got a set of valid edits for each file. Apply them. + for path, edits := range editsByPath { + // TODO(adonovan): this should really work on the same + // gulp from the file system that fed the analyzer (see #62292). + contents, err := os.ReadFile(path) + if err != nil { + return err + } + + out, err := diff.ApplyBytes(contents, edits) + if err != nil { + return err + } + + // Try to format the file. + if formatted, err := format.Source(out); err == nil { + out = formatted + } + + if err := os.WriteFile(path, out, 0644); err != nil { + return err + } + } + return nil +} + +// validateEdits returns a list of edits that is sorted and +// contains no duplicate edits. Returns the index of some +// overlapping adjacent edits if there is one and <0 if the +// edits are valid. +func validateEdits(edits []diff.Edit) ([]diff.Edit, int) { + if len(edits) == 0 { + return nil, -1 + } + equivalent := func(x, y diff.Edit) bool { + return x.Start == y.Start && x.End == y.End && x.New == y.New + } + diff.SortEdits(edits) + unique := []diff.Edit{edits[0]} + invalid := -1 + for i := 1; i < len(edits); i++ { + prev, cur := edits[i-1], edits[i] + // We skip over equivalent edits without considering them + // an error. This handles identical edits coming from the + // multiple ways of loading a package into a + // *go/packages.Packages for testing, e.g. packages "p" and "p [p.test]". + if !equivalent(prev, cur) { + unique = append(unique, cur) + if prev.End > cur.Start { + invalid = i + } + } + } + return unique, invalid +} + +// diff3Conflict returns an error describing two conflicting sets of +// edits on a file at path. +func diff3Conflict(path string, xlabel, ylabel string, xedits, yedits []diff.Edit) error { + contents, err := os.ReadFile(path) + if err != nil { + return err + } + oldlabel, old := "base", string(contents) + + xdiff, err := diff.ToUnified(oldlabel, xlabel, old, xedits, diff.DefaultContextLines) + if err != nil { + return err + } + ydiff, err := diff.ToUnified(oldlabel, ylabel, old, yedits, diff.DefaultContextLines) + if err != nil { + return err + } + + return fmt.Errorf("conflicting edits from %s and %s on %s\nfirst edits:\n%s\nsecond edits:\n%s", + xlabel, ylabel, path, xdiff, ydiff) +} + +// printDiagnostics prints the diagnostics for the root packages in either +// plain text or JSON format. JSON format also includes errors for any +// dependencies. +// +// It returns the exitcode: in plain mode, 0 for success, 1 for analysis +// errors, and 3 for diagnostics. We avoid 2 since the flag package uses +// it. JSON mode always succeeds at printing errors and diagnostics in a +// structured form to stdout. +func printDiagnostics(roots []*action) (exitcode int) { + // Print the output. + // + // Print diagnostics only for root packages, + // but errors for all packages. + printed := make(map[*action]bool) + var print func(*action) + var visitAll func(actions []*action) + visitAll = func(actions []*action) { + for _, act := range actions { + if !printed[act] { + printed[act] = true + visitAll(act.deps) + print(act) + } + } + } + + if analysisflags.JSON { + // JSON output + tree := make(analysisflags.JSONTree) + print = func(act *action) { + var diags []analysis.Diagnostic + if act.isroot { + diags = act.diagnostics + } + tree.Add(act.pkg.Fset, act.pkg.ID, act.a.Name, diags, act.err) + } + visitAll(roots) + tree.Print() + } else { + // plain text output + + // De-duplicate diagnostics by position (not token.Pos) to + // avoid double-reporting in source files that belong to + // multiple packages, such as foo and foo.test. + type key struct { + pos token.Position + end token.Position + *analysis.Analyzer + message string + } + seen := make(map[key]bool) + + print = func(act *action) { + if act.err != nil { + fmt.Fprintf(os.Stderr, "%s: %v\n", act.a.Name, act.err) + exitcode = 1 // analysis failed, at least partially + return + } + if act.isroot { + for _, diag := range act.diagnostics { + // We don't display a.Name/f.Category + // as most users don't care. + + posn := act.pkg.Fset.Position(diag.Pos) + end := act.pkg.Fset.Position(diag.End) + k := key{posn, end, act.a, diag.Message} + if seen[k] { + continue // duplicate + } + seen[k] = true + + analysisflags.PrintPlain(act.pkg.Fset, diag) + } + } + } + visitAll(roots) + + if exitcode == 0 && len(seen) > 0 { + exitcode = 3 // successfully produced diagnostics + } + } + + // Print timing info. + if dbg('t') { + if !dbg('p') { + log.Println("Warning: times are mostly GC/scheduler noise; use -debug=tp to disable parallelism") + } + var all []*action + var total time.Duration + for act := range printed { + all = append(all, act) + total += act.duration + } + sort.Slice(all, func(i, j int) bool { + return all[i].duration > all[j].duration + }) + + // Print actions accounting for 90% of the total. + var sum time.Duration + for _, act := range all { + fmt.Fprintf(os.Stderr, "%s\t%s\n", act.duration, act) + sum += act.duration + if sum >= total*9/10 { + break + } + } + } + + return exitcode +} + +// needFacts reports whether any analysis required by the specified set +// needs facts. If so, we must load the entire program from source. +func needFacts(analyzers []*analysis.Analyzer) bool { + seen := make(map[*analysis.Analyzer]bool) + var q []*analysis.Analyzer // for BFS + q = append(q, analyzers...) + for len(q) > 0 { + a := q[0] + q = q[1:] + if !seen[a] { + seen[a] = true + if len(a.FactTypes) > 0 { + return true + } + q = append(q, a.Requires...) + } + } + return false +} + +// An action represents one unit of analysis work: the application of +// one analysis to one package. Actions form a DAG, both within a +// package (as different analyzers are applied, either in sequence or +// parallel), and across packages (as dependencies are analyzed). +type action struct { + once sync.Once + a *analysis.Analyzer + pkg *packages.Package + pass *analysis.Pass + isroot bool + deps []*action + objectFacts map[objectFactKey]analysis.Fact + packageFacts map[packageFactKey]analysis.Fact + result interface{} + diagnostics []analysis.Diagnostic + err error + duration time.Duration +} + +type objectFactKey struct { + obj types.Object + typ reflect.Type +} + +type packageFactKey struct { + pkg *types.Package + typ reflect.Type +} + +func (act *action) String() string { + return fmt.Sprintf("%s@%s", act.a, act.pkg) +} + +func execAll(actions []*action) { + sequential := dbg('p') + var wg sync.WaitGroup + for _, act := range actions { + wg.Add(1) + work := func(act *action) { + act.exec() + wg.Done() + } + if sequential { + work(act) + } else { + go work(act) + } + } + wg.Wait() +} + +func (act *action) exec() { act.once.Do(act.execOnce) } + +func (act *action) execOnce() { + // Analyze dependencies. + execAll(act.deps) + + // TODO(adonovan): uncomment this during profiling. + // It won't build pre-go1.11 but conditional compilation + // using build tags isn't warranted. + // + // ctx, task := trace.NewTask(context.Background(), "exec") + // trace.Log(ctx, "pass", act.String()) + // defer task.End() + + // Record time spent in this node but not its dependencies. + // In parallel mode, due to GC/scheduler contention, the + // time is 5x higher than in sequential mode, even with a + // semaphore limiting the number of threads here. + // So use -debug=tp. + if dbg('t') { + t0 := time.Now() + defer func() { act.duration = time.Since(t0) }() + } + + // Report an error if any dependency failed. + var failed []string + for _, dep := range act.deps { + if dep.err != nil { + failed = append(failed, dep.String()) + } + } + if failed != nil { + sort.Strings(failed) + act.err = fmt.Errorf("failed prerequisites: %s", strings.Join(failed, ", ")) + return + } + + // Plumb the output values of the dependencies + // into the inputs of this action. Also facts. + inputs := make(map[*analysis.Analyzer]interface{}) + act.objectFacts = make(map[objectFactKey]analysis.Fact) + act.packageFacts = make(map[packageFactKey]analysis.Fact) + for _, dep := range act.deps { + if dep.pkg == act.pkg { + // Same package, different analysis (horizontal edge): + // in-memory outputs of prerequisite analyzers + // become inputs to this analysis pass. + inputs[dep.a] = dep.result + + } else if dep.a == act.a { // (always true) + // Same analysis, different package (vertical edge): + // serialized facts produced by prerequisite analysis + // become available to this analysis pass. + inheritFacts(act, dep) + } + } + + module := &analysis.Module{} // possibly empty (non nil) in go/analysis drivers. + if mod := act.pkg.Module; mod != nil { + module.Path = mod.Path + module.Version = mod.Version + module.GoVersion = mod.GoVersion + } + + // Run the analysis. + pass := &analysis.Pass{ + Analyzer: act.a, + Fset: act.pkg.Fset, + Files: act.pkg.Syntax, + OtherFiles: act.pkg.OtherFiles, + IgnoredFiles: act.pkg.IgnoredFiles, + Pkg: act.pkg.Types, + TypesInfo: act.pkg.TypesInfo, + TypesSizes: act.pkg.TypesSizes, + TypeErrors: act.pkg.TypeErrors, + Module: module, + + ResultOf: inputs, + Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) }, + ImportObjectFact: act.importObjectFact, + ExportObjectFact: act.exportObjectFact, + ImportPackageFact: act.importPackageFact, + ExportPackageFact: act.exportPackageFact, + AllObjectFacts: act.allObjectFacts, + AllPackageFacts: act.allPackageFacts, + } + pass.ReadFile = analysisinternal.MakeReadFile(pass) + act.pass = pass + + var err error + if act.pkg.IllTyped && !pass.Analyzer.RunDespiteErrors { + err = fmt.Errorf("analysis skipped due to errors in package") + } else { + act.result, err = pass.Analyzer.Run(pass) + if err == nil { + if got, want := reflect.TypeOf(act.result), pass.Analyzer.ResultType; got != want { + err = fmt.Errorf( + "internal error: on package %s, analyzer %s returned a result of type %v, but declared ResultType %v", + pass.Pkg.Path(), pass.Analyzer, got, want) + } + } + } + if err == nil { // resolve diagnostic URLs + for i := range act.diagnostics { + if url, uerr := analysisflags.ResolveURL(act.a, act.diagnostics[i]); uerr == nil { + act.diagnostics[i].URL = url + } else { + err = uerr // keep the last error + } + } + } + act.err = err + + // disallow calls after Run + pass.ExportObjectFact = nil + pass.ExportPackageFact = nil +} + +// inheritFacts populates act.facts with +// those it obtains from its dependency, dep. +func inheritFacts(act, dep *action) { + serialize := dbg('s') + + for key, fact := range dep.objectFacts { + // Filter out facts related to objects + // that are irrelevant downstream + // (equivalently: not in the compiler export data). + if !exportedFrom(key.obj, dep.pkg.Types) { + if false { + log.Printf("%v: discarding %T fact from %s for %s: %s", act, fact, dep, key.obj, fact) + } + continue + } + + // Optionally serialize/deserialize fact + // to verify that it works across address spaces. + if serialize { + encodedFact, err := codeFact(fact) + if err != nil { + log.Panicf("internal error: encoding of %T fact failed in %v: %v", fact, act, err) + } + fact = encodedFact + } + + if false { + log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.obj, fact) + } + act.objectFacts[key] = fact + } + + for key, fact := range dep.packageFacts { + // TODO: filter out facts that belong to + // packages not mentioned in the export data + // to prevent side channels. + + // Optionally serialize/deserialize fact + // to verify that it works across address spaces + // and is deterministic. + if serialize { + encodedFact, err := codeFact(fact) + if err != nil { + log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) + } + fact = encodedFact + } + + if false { + log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.pkg.Path(), fact) + } + act.packageFacts[key] = fact + } +} + +// codeFact encodes then decodes a fact, +// just to exercise that logic. +func codeFact(fact analysis.Fact) (analysis.Fact, error) { + // We encode facts one at a time. + // A real modular driver would emit all facts + // into one encoder to improve gob efficiency. + var buf bytes.Buffer + if err := gob.NewEncoder(&buf).Encode(fact); err != nil { + return nil, err + } + + // Encode it twice and assert that we get the same bits. + // This helps detect nondeterministic Gob encoding (e.g. of maps). + var buf2 bytes.Buffer + if err := gob.NewEncoder(&buf2).Encode(fact); err != nil { + return nil, err + } + if !bytes.Equal(buf.Bytes(), buf2.Bytes()) { + return nil, fmt.Errorf("encoding of %T fact is nondeterministic", fact) + } + + new := reflect.New(reflect.TypeOf(fact).Elem()).Interface().(analysis.Fact) + if err := gob.NewDecoder(&buf).Decode(new); err != nil { + return nil, err + } + return new, nil +} + +// exportedFrom reports whether obj may be visible to a package that imports pkg. +// This includes not just the exported members of pkg, but also unexported +// constants, types, fields, and methods, perhaps belonging to other packages, +// that find there way into the API. +// This is an overapproximation of the more accurate approach used by +// gc export data, which walks the type graph, but it's much simpler. +// +// TODO(adonovan): do more accurate filtering by walking the type graph. +func exportedFrom(obj types.Object, pkg *types.Package) bool { + switch obj := obj.(type) { + case *types.Func: + return obj.Exported() && obj.Pkg() == pkg || + obj.Type().(*types.Signature).Recv() != nil + case *types.Var: + if obj.IsField() { + return true + } + // we can't filter more aggressively than this because we need + // to consider function parameters exported, but have no way + // of telling apart function parameters from local variables. + return obj.Pkg() == pkg + case *types.TypeName, *types.Const: + return true + } + return false // Nil, Builtin, Label, or PkgName +} + +// importObjectFact implements Pass.ImportObjectFact. +// Given a non-nil pointer ptr of type *T, where *T satisfies Fact, +// importObjectFact copies the fact value to *ptr. +func (act *action) importObjectFact(obj types.Object, ptr analysis.Fact) bool { + if obj == nil { + panic("nil object") + } + key := objectFactKey{obj, factType(ptr)} + if v, ok := act.objectFacts[key]; ok { + reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) + return true + } + return false +} + +// exportObjectFact implements Pass.ExportObjectFact. +func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) { + if act.pass.ExportObjectFact == nil { + log.Panicf("%s: Pass.ExportObjectFact(%s, %T) called after Run", act, obj, fact) + } + + if obj.Pkg() != act.pkg.Types { + log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package", + act.a, act.pkg, obj, fact) + } + + key := objectFactKey{obj, factType(fact)} + act.objectFacts[key] = fact // clobber any existing entry + if dbg('f') { + objstr := types.ObjectString(obj, (*types.Package).Name) + fmt.Fprintf(os.Stderr, "%s: object %s has fact %s\n", + act.pkg.Fset.Position(obj.Pos()), objstr, fact) + } +} + +// allObjectFacts implements Pass.AllObjectFacts. +func (act *action) allObjectFacts() []analysis.ObjectFact { + facts := make([]analysis.ObjectFact, 0, len(act.objectFacts)) + for k := range act.objectFacts { + facts = append(facts, analysis.ObjectFact{Object: k.obj, Fact: act.objectFacts[k]}) + } + return facts +} + +// importPackageFact implements Pass.ImportPackageFact. +// Given a non-nil pointer ptr of type *T, where *T satisfies Fact, +// fact copies the fact value to *ptr. +func (act *action) importPackageFact(pkg *types.Package, ptr analysis.Fact) bool { + if pkg == nil { + panic("nil package") + } + key := packageFactKey{pkg, factType(ptr)} + if v, ok := act.packageFacts[key]; ok { + reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) + return true + } + return false +} + +// exportPackageFact implements Pass.ExportPackageFact. +func (act *action) exportPackageFact(fact analysis.Fact) { + if act.pass.ExportPackageFact == nil { + log.Panicf("%s: Pass.ExportPackageFact(%T) called after Run", act, fact) + } + + key := packageFactKey{act.pass.Pkg, factType(fact)} + act.packageFacts[key] = fact // clobber any existing entry + if dbg('f') { + fmt.Fprintf(os.Stderr, "%s: package %s has fact %s\n", + act.pkg.Fset.Position(act.pass.Files[0].Pos()), act.pass.Pkg.Path(), fact) + } +} + +func factType(fact analysis.Fact) reflect.Type { + t := reflect.TypeOf(fact) + if t.Kind() != reflect.Ptr { + log.Fatalf("invalid Fact type: got %T, want pointer", fact) + } + return t +} + +// allPackageFacts implements Pass.AllPackageFacts. +func (act *action) allPackageFacts() []analysis.PackageFact { + facts := make([]analysis.PackageFact, 0, len(act.packageFacts)) + for k := range act.packageFacts { + facts = append(facts, analysis.PackageFact{Package: k.pkg, Fact: act.packageFacts[k]}) + } + return facts +} + +func dbg(b byte) bool { return strings.IndexByte(Debug, b) >= 0 } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go b/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go new file mode 100644 index 00000000000..3b121cb0ce7 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go @@ -0,0 +1,49 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package inspect defines an Analyzer that provides an AST inspector +// (golang.org/x/tools/go/ast/inspector.Inspector) for the syntax trees +// of a package. It is only a building block for other analyzers. +// +// Example of use in another analysis: +// +// import ( +// "golang.org/x/tools/go/analysis" +// "golang.org/x/tools/go/analysis/passes/inspect" +// "golang.org/x/tools/go/ast/inspector" +// ) +// +// var Analyzer = &analysis.Analyzer{ +// ... +// Requires: []*analysis.Analyzer{inspect.Analyzer}, +// } +// +// func run(pass *analysis.Pass) (interface{}, error) { +// inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) +// inspect.Preorder(nil, func(n ast.Node) { +// ... +// }) +// return nil, nil +// } +package inspect + +import ( + "reflect" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/ast/inspector" +) + +var Analyzer = &analysis.Analyzer{ + Name: "inspect", + Doc: "optimize AST traversal for later passes", + URL: "https://pkg.go.dev/golang.org/x/tools/go/analysis/passes/inspect", + Run: run, + RunDespiteErrors: true, + ResultType: reflect.TypeOf(new(inspector.Inspector)), +} + +func run(pass *analysis.Pass) (interface{}, error) { + return inspector.New(pass.Files), nil +} diff --git a/vendor/golang.org/x/tools/go/analysis/singlechecker/singlechecker.go b/vendor/golang.org/x/tools/go/analysis/singlechecker/singlechecker.go new file mode 100644 index 00000000000..91044ca0858 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/singlechecker/singlechecker.go @@ -0,0 +1,76 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package singlechecker defines the main function for an analysis +// driver with only a single analysis. +// This package makes it easy for a provider of an analysis package to +// also provide a standalone tool that runs just that analysis. +// +// For example, if example.org/findbadness is an analysis package, +// all that is needed to define a standalone tool is a file, +// example.org/findbadness/cmd/findbadness/main.go, containing: +// +// // The findbadness command runs an analysis. +// package main +// +// import ( +// "example.org/findbadness" +// "golang.org/x/tools/go/analysis/singlechecker" +// ) +// +// func main() { singlechecker.Main(findbadness.Analyzer) } +package singlechecker + +import ( + "flag" + "fmt" + "log" + "os" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/internal/analysisflags" + "golang.org/x/tools/go/analysis/internal/checker" + "golang.org/x/tools/go/analysis/unitchecker" +) + +// Main is the main function for a checker command for a single analysis. +func Main(a *analysis.Analyzer) { + log.SetFlags(0) + log.SetPrefix(a.Name + ": ") + + analyzers := []*analysis.Analyzer{a} + + if err := analysis.Validate(analyzers); err != nil { + log.Fatal(err) + } + + checker.RegisterFlags() + + flag.Usage = func() { + paras := strings.Split(a.Doc, "\n\n") + fmt.Fprintf(os.Stderr, "%s: %s\n\n", a.Name, paras[0]) + fmt.Fprintf(os.Stderr, "Usage: %s [-flag] [package]\n\n", a.Name) + if len(paras) > 1 { + fmt.Fprintln(os.Stderr, strings.Join(paras[1:], "\n\n")) + } + fmt.Fprintln(os.Stderr, "\nFlags:") + flag.PrintDefaults() + } + + analyzers = analysisflags.Parse(analyzers, false) + + args := flag.Args() + if len(args) == 0 { + flag.Usage() + os.Exit(1) + } + + if len(args) == 1 && strings.HasSuffix(args[0], ".cfg") { + unitchecker.Run(args[0], analyzers) + panic("unreachable") + } + + os.Exit(checker.Run(args, analyzers)) +} diff --git a/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go b/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go new file mode 100644 index 00000000000..71ebbfaef15 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go @@ -0,0 +1,452 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The unitchecker package defines the main function for an analysis +// driver that analyzes a single compilation unit during a build. +// It is invoked by a build system such as "go vet": +// +// $ go vet -vettool=$(which vet) +// +// It supports the following command-line protocol: +// +// -V=full describe executable (to the build tool) +// -flags describe flags (to the build tool) +// foo.cfg description of compilation unit (from the build tool) +// +// This package does not depend on go/packages. +// If you need a standalone tool, use multichecker, +// which supports this mode but can also load packages +// from source using go/packages. +package unitchecker + +// TODO(adonovan): +// - with gccgo, go build does not build standard library, +// so we will not get to analyze it. Yet we must in order +// to create base facts for, say, the fmt package for the +// printf checker. + +import ( + "encoding/gob" + "encoding/json" + "flag" + "fmt" + "go/ast" + "go/build" + "go/importer" + "go/parser" + "go/token" + "go/types" + "io" + "log" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "sync" + "time" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/internal/analysisflags" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/facts" + "golang.org/x/tools/internal/versions" +) + +// A Config describes a compilation unit to be analyzed. +// It is provided to the tool in a JSON-encoded file +// whose name ends with ".cfg". +type Config struct { + ID string // e.g. "fmt [fmt.test]" + Compiler string // gc or gccgo, provided to MakeImporter + Dir string // (unused) + ImportPath string // package path + GoVersion string // minimum required Go version, such as "go1.21.0" + GoFiles []string + NonGoFiles []string + IgnoredFiles []string + ModulePath string // module path + ModuleVersion string // module version + ImportMap map[string]string // maps import path to package path + PackageFile map[string]string // maps package path to file of type information + Standard map[string]bool // package belongs to standard library + PackageVetx map[string]string // maps package path to file of fact information + VetxOnly bool // run analysis only for facts, not diagnostics + VetxOutput string // where to write file of fact information + SucceedOnTypecheckFailure bool +} + +// Main is the main function of a vet-like analysis tool that must be +// invoked by a build system to analyze a single package. +// +// The protocol required by 'go vet -vettool=...' is that the tool must support: +// +// -flags describe flags in JSON +// -V=full describe executable for build caching +// foo.cfg perform separate modular analyze on the single +// unit described by a JSON config file foo.cfg. +func Main(analyzers ...*analysis.Analyzer) { + progname := filepath.Base(os.Args[0]) + log.SetFlags(0) + log.SetPrefix(progname + ": ") + + if err := analysis.Validate(analyzers); err != nil { + log.Fatal(err) + } + + flag.Usage = func() { + fmt.Fprintf(os.Stderr, `%[1]s is a tool for static analysis of Go programs. + +Usage of %[1]s: + %.16[1]s unit.cfg # execute analysis specified by config file + %.16[1]s help # general help, including listing analyzers and flags + %.16[1]s help name # help on specific analyzer and its flags +`, progname) + os.Exit(1) + } + + analyzers = analysisflags.Parse(analyzers, true) + + args := flag.Args() + if len(args) == 0 { + flag.Usage() + } + if args[0] == "help" { + analysisflags.Help(progname, analyzers, args[1:]) + os.Exit(0) + } + if len(args) != 1 || !strings.HasSuffix(args[0], ".cfg") { + log.Fatalf(`invoking "go tool vet" directly is unsupported; use "go vet"`) + } + Run(args[0], analyzers) +} + +// Run reads the *.cfg file, runs the analysis, +// and calls os.Exit with an appropriate error code. +// It assumes flags have already been set. +func Run(configFile string, analyzers []*analysis.Analyzer) { + cfg, err := readConfig(configFile) + if err != nil { + log.Fatal(err) + } + + fset := token.NewFileSet() + results, err := run(fset, cfg, analyzers) + if err != nil { + log.Fatal(err) + } + + // In VetxOnly mode, the analysis is run only for facts. + if !cfg.VetxOnly { + if analysisflags.JSON { + // JSON output + tree := make(analysisflags.JSONTree) + for _, res := range results { + tree.Add(fset, cfg.ID, res.a.Name, res.diagnostics, res.err) + } + tree.Print() + } else { + // plain text + exit := 0 + for _, res := range results { + if res.err != nil { + log.Println(res.err) + exit = 1 + } + } + for _, res := range results { + for _, diag := range res.diagnostics { + analysisflags.PrintPlain(fset, diag) + exit = 1 + } + } + os.Exit(exit) + } + } + + os.Exit(0) +} + +func readConfig(filename string) (*Config, error) { + data, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + cfg := new(Config) + if err := json.Unmarshal(data, cfg); err != nil { + return nil, fmt.Errorf("cannot decode JSON config file %s: %v", filename, err) + } + if len(cfg.GoFiles) == 0 { + // The go command disallows packages with no files. + // The only exception is unsafe, but the go command + // doesn't call vet on it. + return nil, fmt.Errorf("package has no files: %s", cfg.ImportPath) + } + return cfg, nil +} + +type factImporter = func(pkgPath string) ([]byte, error) + +// These four hook variables are a proof of concept of a future +// parameterization of a unitchecker API that allows the client to +// determine how and where facts and types are produced and consumed. +// (Note that the eventual API will likely be quite different.) +// +// The defaults honor a Config in a manner compatible with 'go vet'. +var ( + makeTypesImporter = func(cfg *Config, fset *token.FileSet) types.Importer { + compilerImporter := importer.ForCompiler(fset, cfg.Compiler, func(path string) (io.ReadCloser, error) { + // path is a resolved package path, not an import path. + file, ok := cfg.PackageFile[path] + if !ok { + if cfg.Compiler == "gccgo" && cfg.Standard[path] { + return nil, nil // fall back to default gccgo lookup + } + return nil, fmt.Errorf("no package file for %q", path) + } + return os.Open(file) + }) + return importerFunc(func(importPath string) (*types.Package, error) { + path, ok := cfg.ImportMap[importPath] // resolve vendoring, etc + if !ok { + return nil, fmt.Errorf("can't resolve import %q", path) + } + return compilerImporter.Import(path) + }) + } + + exportTypes = func(*Config, *token.FileSet, *types.Package) error { + // By default this is a no-op, because "go vet" + // makes the compiler produce type information. + return nil + } + + makeFactImporter = func(cfg *Config) factImporter { + return func(pkgPath string) ([]byte, error) { + if vetx, ok := cfg.PackageVetx[pkgPath]; ok { + return os.ReadFile(vetx) + } + return nil, nil // no .vetx file, no facts + } + } + + exportFacts = func(cfg *Config, data []byte) error { + return os.WriteFile(cfg.VetxOutput, data, 0666) + } +) + +func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]result, error) { + // Load, parse, typecheck. + var files []*ast.File + for _, name := range cfg.GoFiles { + f, err := parser.ParseFile(fset, name, nil, parser.ParseComments) + if err != nil { + if cfg.SucceedOnTypecheckFailure { + // Silently succeed; let the compiler + // report parse errors. + err = nil + } + return nil, err + } + files = append(files, f) + } + tc := &types.Config{ + Importer: makeTypesImporter(cfg, fset), + Sizes: types.SizesFor("gc", build.Default.GOARCH), // TODO(adonovan): use cfg.Compiler + GoVersion: cfg.GoVersion, + } + info := &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + versions.InitFileVersions(info) + + pkg, err := tc.Check(cfg.ImportPath, fset, files, info) + if err != nil { + if cfg.SucceedOnTypecheckFailure { + // Silently succeed; let the compiler + // report type errors. + err = nil + } + return nil, err + } + + // Register fact types with gob. + // In VetxOnly mode, analyzers are only for their facts, + // so we can skip any analysis that neither produces facts + // nor depends on any analysis that produces facts. + // + // TODO(adonovan): fix: the command (and logic!) here are backwards. + // It should say "...nor is required by any...". (Issue 443099) + // + // Also build a map to hold working state and result. + type action struct { + once sync.Once + result interface{} + err error + usesFacts bool // (transitively uses) + diagnostics []analysis.Diagnostic + } + actions := make(map[*analysis.Analyzer]*action) + var registerFacts func(a *analysis.Analyzer) bool + registerFacts = func(a *analysis.Analyzer) bool { + act, ok := actions[a] + if !ok { + act = new(action) + var usesFacts bool + for _, f := range a.FactTypes { + usesFacts = true + gob.Register(f) + } + for _, req := range a.Requires { + if registerFacts(req) { + usesFacts = true + } + } + act.usesFacts = usesFacts + actions[a] = act + } + return act.usesFacts + } + var filtered []*analysis.Analyzer + for _, a := range analyzers { + if registerFacts(a) || !cfg.VetxOnly { + filtered = append(filtered, a) + } + } + analyzers = filtered + + // Read facts from imported packages. + facts, err := facts.NewDecoder(pkg).Decode(makeFactImporter(cfg)) + if err != nil { + return nil, err + } + + // In parallel, execute the DAG of analyzers. + var exec func(a *analysis.Analyzer) *action + var execAll func(analyzers []*analysis.Analyzer) + exec = func(a *analysis.Analyzer) *action { + act := actions[a] + act.once.Do(func() { + execAll(a.Requires) // prefetch dependencies in parallel + + // The inputs to this analysis are the + // results of its prerequisites. + inputs := make(map[*analysis.Analyzer]interface{}) + var failed []string + for _, req := range a.Requires { + reqact := exec(req) + if reqact.err != nil { + failed = append(failed, req.String()) + continue + } + inputs[req] = reqact.result + } + + // Report an error if any dependency failed. + if failed != nil { + sort.Strings(failed) + act.err = fmt.Errorf("failed prerequisites: %s", strings.Join(failed, ", ")) + return + } + + factFilter := make(map[reflect.Type]bool) + for _, f := range a.FactTypes { + factFilter[reflect.TypeOf(f)] = true + } + + module := &analysis.Module{ + Path: cfg.ModulePath, + Version: cfg.ModuleVersion, + GoVersion: cfg.GoVersion, + } + + pass := &analysis.Pass{ + Analyzer: a, + Fset: fset, + Files: files, + OtherFiles: cfg.NonGoFiles, + IgnoredFiles: cfg.IgnoredFiles, + Pkg: pkg, + TypesInfo: info, + TypesSizes: tc.Sizes, + TypeErrors: nil, // unitchecker doesn't RunDespiteErrors + ResultOf: inputs, + Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) }, + ImportObjectFact: facts.ImportObjectFact, + ExportObjectFact: facts.ExportObjectFact, + AllObjectFacts: func() []analysis.ObjectFact { return facts.AllObjectFacts(factFilter) }, + ImportPackageFact: facts.ImportPackageFact, + ExportPackageFact: facts.ExportPackageFact, + AllPackageFacts: func() []analysis.PackageFact { return facts.AllPackageFacts(factFilter) }, + Module: module, + } + pass.ReadFile = analysisinternal.MakeReadFile(pass) + + t0 := time.Now() + act.result, act.err = a.Run(pass) + + if act.err == nil { // resolve URLs on diagnostics. + for i := range act.diagnostics { + if url, uerr := analysisflags.ResolveURL(a, act.diagnostics[i]); uerr == nil { + act.diagnostics[i].URL = url + } else { + act.err = uerr // keep the last error + } + } + } + if false { + log.Printf("analysis %s = %s", pass, time.Since(t0)) + } + }) + return act + } + execAll = func(analyzers []*analysis.Analyzer) { + var wg sync.WaitGroup + for _, a := range analyzers { + wg.Add(1) + go func(a *analysis.Analyzer) { + _ = exec(a) + wg.Done() + }(a) + } + wg.Wait() + } + + execAll(analyzers) + + // Return diagnostics and errors from root analyzers. + results := make([]result, len(analyzers)) + for i, a := range analyzers { + act := actions[a] + results[i].a = a + results[i].err = act.err + results[i].diagnostics = act.diagnostics + } + + data := facts.Encode() + if err := exportFacts(cfg, data); err != nil { + return nil, fmt.Errorf("failed to export analysis facts: %v", err) + } + if err := exportTypes(cfg, fset, pkg); err != nil { + return nil, fmt.Errorf("failed to export type information: %v", err) + } + + return results, nil +} + +type result struct { + a *analysis.Analyzer + diagnostics []analysis.Diagnostic + err error +} + +type importerFunc func(path string) (*types.Package, error) + +func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } diff --git a/vendor/golang.org/x/tools/go/analysis/validate.go b/vendor/golang.org/x/tools/go/analysis/validate.go new file mode 100644 index 00000000000..4f2c4045622 --- /dev/null +++ b/vendor/golang.org/x/tools/go/analysis/validate.go @@ -0,0 +1,137 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package analysis + +import ( + "fmt" + "reflect" + "strings" + "unicode" +) + +// Validate reports an error if any of the analyzers are misconfigured. +// Checks include: +// that the name is a valid identifier; +// that the Doc is not empty; +// that the Run is non-nil; +// that the Requires graph is acyclic; +// that analyzer fact types are unique; +// that each fact type is a pointer. +// +// Analyzer names need not be unique, though this may be confusing. +func Validate(analyzers []*Analyzer) error { + // Map each fact type to its sole generating analyzer. + factTypes := make(map[reflect.Type]*Analyzer) + + // Traverse the Requires graph, depth first. + const ( + white = iota + grey + black + finished + ) + color := make(map[*Analyzer]uint8) + var visit func(a *Analyzer) error + visit = func(a *Analyzer) error { + if a == nil { + return fmt.Errorf("nil *Analyzer") + } + if color[a] == white { + color[a] = grey + + // names + if !validIdent(a.Name) { + return fmt.Errorf("invalid analyzer name %q", a) + } + + if a.Doc == "" { + return fmt.Errorf("analyzer %q is undocumented", a) + } + + if a.Run == nil { + return fmt.Errorf("analyzer %q has nil Run", a) + } + // fact types + for _, f := range a.FactTypes { + if f == nil { + return fmt.Errorf("analyzer %s has nil FactType", a) + } + t := reflect.TypeOf(f) + if prev := factTypes[t]; prev != nil { + return fmt.Errorf("fact type %s registered by two analyzers: %v, %v", + t, a, prev) + } + if t.Kind() != reflect.Ptr { + return fmt.Errorf("%s: fact type %s is not a pointer", a, t) + } + factTypes[t] = a + } + + // recursion + for _, req := range a.Requires { + if err := visit(req); err != nil { + return err + } + } + color[a] = black + } + + if color[a] == grey { + stack := []*Analyzer{a} + inCycle := map[string]bool{} + for len(stack) > 0 { + current := stack[len(stack)-1] + stack = stack[:len(stack)-1] + if color[current] == grey && !inCycle[current.Name] { + inCycle[current.Name] = true + stack = append(stack, current.Requires...) + } + } + return &CycleInRequiresGraphError{AnalyzerNames: inCycle} + } + + return nil + } + for _, a := range analyzers { + if err := visit(a); err != nil { + return err + } + } + + // Reject duplicates among analyzers. + // Precondition: color[a] == black. + // Postcondition: color[a] == finished. + for _, a := range analyzers { + if color[a] == finished { + return fmt.Errorf("duplicate analyzer: %s", a.Name) + } + color[a] = finished + } + + return nil +} + +func validIdent(name string) bool { + for i, r := range name { + if !(r == '_' || unicode.IsLetter(r) || i > 0 && unicode.IsDigit(r)) { + return false + } + } + return name != "" +} + +type CycleInRequiresGraphError struct { + AnalyzerNames map[string]bool +} + +func (e *CycleInRequiresGraphError) Error() string { + var b strings.Builder + b.WriteString("cycle detected involving the following analyzers:") + for n := range e.AnalyzerNames { + b.WriteByte(' ') + b.WriteString(n) + } + return b.String() +} diff --git a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go new file mode 100644 index 00000000000..e0b13e70a01 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go @@ -0,0 +1,513 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package analysisinternal provides gopls' internal analyses with a +// number of helper functions that operate on typed syntax trees. +package analysisinternal + +import ( + "bytes" + "fmt" + "go/ast" + "go/token" + "go/types" + "os" + pathpkg "path" + "strconv" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/internal/aliases" +) + +func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos { + // Get the end position for the type error. + offset, end := fset.PositionFor(start, false).Offset, start + if offset >= len(src) { + return end + } + if width := bytes.IndexAny(src[offset:], " \n,():;[]+-*"); width > 0 { + end = start + token.Pos(width) + } + return end +} + +func ZeroValue(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { + // TODO(adonovan): think about generics, and also generic aliases. + under := aliases.Unalias(typ) + // Don't call Underlying unconditionally: although it removes + // Named and Alias, it also removes TypeParam. + if n, ok := under.(*types.Named); ok { + under = n.Underlying() + } + switch under := under.(type) { + case *types.Basic: + switch { + case under.Info()&types.IsNumeric != 0: + return &ast.BasicLit{Kind: token.INT, Value: "0"} + case under.Info()&types.IsBoolean != 0: + return &ast.Ident{Name: "false"} + case under.Info()&types.IsString != 0: + return &ast.BasicLit{Kind: token.STRING, Value: `""`} + default: + panic(fmt.Sprintf("unknown basic type %v", under)) + } + case *types.Chan, *types.Interface, *types.Map, *types.Pointer, *types.Signature, *types.Slice, *types.Array: + return ast.NewIdent("nil") + case *types.Struct: + texpr := TypeExpr(f, pkg, typ) // typ because we want the name here. + if texpr == nil { + return nil + } + return &ast.CompositeLit{ + Type: texpr, + } + } + return nil +} + +// IsZeroValue checks whether the given expression is a 'zero value' (as determined by output of +// analysisinternal.ZeroValue) +func IsZeroValue(expr ast.Expr) bool { + switch e := expr.(type) { + case *ast.BasicLit: + return e.Value == "0" || e.Value == `""` + case *ast.Ident: + return e.Name == "nil" || e.Name == "false" + default: + return false + } +} + +// TypeExpr returns syntax for the specified type. References to +// named types from packages other than pkg are qualified by an appropriate +// package name, as defined by the import environment of file. +func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { + switch t := typ.(type) { + case *types.Basic: + switch t.Kind() { + case types.UnsafePointer: + return &ast.SelectorExpr{X: ast.NewIdent("unsafe"), Sel: ast.NewIdent("Pointer")} + default: + return ast.NewIdent(t.Name()) + } + case *types.Pointer: + x := TypeExpr(f, pkg, t.Elem()) + if x == nil { + return nil + } + return &ast.UnaryExpr{ + Op: token.MUL, + X: x, + } + case *types.Array: + elt := TypeExpr(f, pkg, t.Elem()) + if elt == nil { + return nil + } + return &ast.ArrayType{ + Len: &ast.BasicLit{ + Kind: token.INT, + Value: fmt.Sprintf("%d", t.Len()), + }, + Elt: elt, + } + case *types.Slice: + elt := TypeExpr(f, pkg, t.Elem()) + if elt == nil { + return nil + } + return &ast.ArrayType{ + Elt: elt, + } + case *types.Map: + key := TypeExpr(f, pkg, t.Key()) + value := TypeExpr(f, pkg, t.Elem()) + if key == nil || value == nil { + return nil + } + return &ast.MapType{ + Key: key, + Value: value, + } + case *types.Chan: + dir := ast.ChanDir(t.Dir()) + if t.Dir() == types.SendRecv { + dir = ast.SEND | ast.RECV + } + value := TypeExpr(f, pkg, t.Elem()) + if value == nil { + return nil + } + return &ast.ChanType{ + Dir: dir, + Value: value, + } + case *types.Signature: + var params []*ast.Field + for i := 0; i < t.Params().Len(); i++ { + p := TypeExpr(f, pkg, t.Params().At(i).Type()) + if p == nil { + return nil + } + params = append(params, &ast.Field{ + Type: p, + Names: []*ast.Ident{ + { + Name: t.Params().At(i).Name(), + }, + }, + }) + } + if t.Variadic() { + last := params[len(params)-1] + last.Type = &ast.Ellipsis{Elt: last.Type.(*ast.ArrayType).Elt} + } + var returns []*ast.Field + for i := 0; i < t.Results().Len(); i++ { + r := TypeExpr(f, pkg, t.Results().At(i).Type()) + if r == nil { + return nil + } + returns = append(returns, &ast.Field{ + Type: r, + }) + } + return &ast.FuncType{ + Params: &ast.FieldList{ + List: params, + }, + Results: &ast.FieldList{ + List: returns, + }, + } + case interface{ Obj() *types.TypeName }: // *types.{Alias,Named,TypeParam} + if t.Obj().Pkg() == nil { + return ast.NewIdent(t.Obj().Name()) + } + if t.Obj().Pkg() == pkg { + return ast.NewIdent(t.Obj().Name()) + } + pkgName := t.Obj().Pkg().Name() + + // If the file already imports the package under another name, use that. + for _, cand := range f.Imports { + if path, _ := strconv.Unquote(cand.Path.Value); path == t.Obj().Pkg().Path() { + if cand.Name != nil && cand.Name.Name != "" { + pkgName = cand.Name.Name + } + } + } + if pkgName == "." { + return ast.NewIdent(t.Obj().Name()) + } + return &ast.SelectorExpr{ + X: ast.NewIdent(pkgName), + Sel: ast.NewIdent(t.Obj().Name()), + } + case *types.Struct: + return ast.NewIdent(t.String()) + case *types.Interface: + return ast.NewIdent(t.String()) + default: + return nil + } +} + +// StmtToInsertVarBefore returns the ast.Stmt before which we can safely insert a new variable. +// Some examples: +// +// Basic Example: +// z := 1 +// y := z + x +// If x is undeclared, then this function would return `y := z + x`, so that we +// can insert `x := ` on the line before `y := z + x`. +// +// If stmt example: +// if z == 1 { +// } else if z == y {} +// If y is undeclared, then this function would return `if z == 1 {`, because we cannot +// insert a statement between an if and an else if statement. As a result, we need to find +// the top of the if chain to insert `y := ` before. +func StmtToInsertVarBefore(path []ast.Node) ast.Stmt { + enclosingIndex := -1 + for i, p := range path { + if _, ok := p.(ast.Stmt); ok { + enclosingIndex = i + break + } + } + if enclosingIndex == -1 { + return nil + } + enclosingStmt := path[enclosingIndex] + switch enclosingStmt.(type) { + case *ast.IfStmt: + // The enclosingStmt is inside of the if declaration, + // We need to check if we are in an else-if stmt and + // get the base if statement. + return baseIfStmt(path, enclosingIndex) + case *ast.CaseClause: + // Get the enclosing switch stmt if the enclosingStmt is + // inside of the case statement. + for i := enclosingIndex + 1; i < len(path); i++ { + if node, ok := path[i].(*ast.SwitchStmt); ok { + return node + } else if node, ok := path[i].(*ast.TypeSwitchStmt); ok { + return node + } + } + } + if len(path) <= enclosingIndex+1 { + return enclosingStmt.(ast.Stmt) + } + // Check if the enclosing statement is inside another node. + switch expr := path[enclosingIndex+1].(type) { + case *ast.IfStmt: + // Get the base if statement. + return baseIfStmt(path, enclosingIndex+1) + case *ast.ForStmt: + if expr.Init == enclosingStmt || expr.Post == enclosingStmt { + return expr + } + case *ast.SwitchStmt, *ast.TypeSwitchStmt: + return expr.(ast.Stmt) + } + return enclosingStmt.(ast.Stmt) +} + +// baseIfStmt walks up the if/else-if chain until we get to +// the top of the current if chain. +func baseIfStmt(path []ast.Node, index int) ast.Stmt { + stmt := path[index] + for i := index + 1; i < len(path); i++ { + if node, ok := path[i].(*ast.IfStmt); ok && node.Else == stmt { + stmt = node + continue + } + break + } + return stmt.(ast.Stmt) +} + +// WalkASTWithParent walks the AST rooted at n. The semantics are +// similar to ast.Inspect except it does not call f(nil). +func WalkASTWithParent(n ast.Node, f func(n ast.Node, parent ast.Node) bool) { + var ancestors []ast.Node + ast.Inspect(n, func(n ast.Node) (recurse bool) { + if n == nil { + ancestors = ancestors[:len(ancestors)-1] + return false + } + + var parent ast.Node + if len(ancestors) > 0 { + parent = ancestors[len(ancestors)-1] + } + ancestors = append(ancestors, n) + return f(n, parent) + }) +} + +// MatchingIdents finds the names of all identifiers in 'node' that match any of the given types. +// 'pos' represents the position at which the identifiers may be inserted. 'pos' must be within +// the scope of each of identifier we select. Otherwise, we will insert a variable at 'pos' that +// is unrecognized. +func MatchingIdents(typs []types.Type, node ast.Node, pos token.Pos, info *types.Info, pkg *types.Package) map[types.Type][]string { + + // Initialize matches to contain the variable types we are searching for. + matches := make(map[types.Type][]string) + for _, typ := range typs { + if typ == nil { + continue // TODO(adonovan): is this reachable? + } + matches[typ] = nil // create entry + } + + seen := map[types.Object]struct{}{} + ast.Inspect(node, func(n ast.Node) bool { + if n == nil { + return false + } + // Prevent circular definitions. If 'pos' is within an assignment statement, do not + // allow any identifiers in that assignment statement to be selected. Otherwise, + // we could do the following, where 'x' satisfies the type of 'f0': + // + // x := fakeStruct{f0: x} + // + if assign, ok := n.(*ast.AssignStmt); ok && pos > assign.Pos() && pos <= assign.End() { + return false + } + if n.End() > pos { + return n.Pos() <= pos + } + ident, ok := n.(*ast.Ident) + if !ok || ident.Name == "_" { + return true + } + obj := info.Defs[ident] + if obj == nil || obj.Type() == nil { + return true + } + if _, ok := obj.(*types.TypeName); ok { + return true + } + // Prevent duplicates in matches' values. + if _, ok = seen[obj]; ok { + return true + } + seen[obj] = struct{}{} + // Find the scope for the given position. Then, check whether the object + // exists within the scope. + innerScope := pkg.Scope().Innermost(pos) + if innerScope == nil { + return true + } + _, foundObj := innerScope.LookupParent(ident.Name, pos) + if foundObj != obj { + return true + } + // The object must match one of the types that we are searching for. + // TODO(adonovan): opt: use typeutil.Map? + if names, ok := matches[obj.Type()]; ok { + matches[obj.Type()] = append(names, ident.Name) + } else { + // If the object type does not exactly match + // any of the target types, greedily find the first + // target type that the object type can satisfy. + for typ := range matches { + if equivalentTypes(obj.Type(), typ) { + matches[typ] = append(matches[typ], ident.Name) + } + } + } + return true + }) + return matches +} + +func equivalentTypes(want, got types.Type) bool { + if types.Identical(want, got) { + return true + } + // Code segment to help check for untyped equality from (golang/go#32146). + if rhs, ok := want.(*types.Basic); ok && rhs.Info()&types.IsUntyped > 0 { + if lhs, ok := got.Underlying().(*types.Basic); ok { + return rhs.Info()&types.IsConstType == lhs.Info()&types.IsConstType + } + } + return types.AssignableTo(want, got) +} + +// MakeReadFile returns a simple implementation of the Pass.ReadFile function. +func MakeReadFile(pass *analysis.Pass) func(filename string) ([]byte, error) { + return func(filename string) ([]byte, error) { + if err := CheckReadable(pass, filename); err != nil { + return nil, err + } + return os.ReadFile(filename) + } +} + +// CheckReadable enforces the access policy defined by the ReadFile field of [analysis.Pass]. +func CheckReadable(pass *analysis.Pass, filename string) error { + if slicesContains(pass.OtherFiles, filename) || + slicesContains(pass.IgnoredFiles, filename) { + return nil + } + for _, f := range pass.Files { + // TODO(adonovan): use go1.20 f.FileStart + if pass.Fset.File(f.Pos()).Name() == filename { + return nil + } + } + return fmt.Errorf("Pass.ReadFile: %s is not among OtherFiles, IgnoredFiles, or names of Files", filename) +} + +// TODO(adonovan): use go1.21 slices.Contains. +func slicesContains[S ~[]E, E comparable](slice S, x E) bool { + for _, elem := range slice { + if elem == x { + return true + } + } + return false +} + +// AddImport checks whether this file already imports pkgpath and +// that import is in scope at pos. If so, it returns the name under +// which it was imported and a zero edit. Otherwise, it adds a new +// import of pkgpath, using a name derived from the preferred name, +// and returns the chosen name along with the edit for the new import. +// +// It does not mutate its arguments. +func AddImport(info *types.Info, file *ast.File, pos token.Pos, pkgpath, preferredName string) (name string, newImport []analysis.TextEdit) { + // Find innermost enclosing lexical block. + scope := info.Scopes[file].Innermost(pos) + if scope == nil { + panic("no enclosing lexical block") + } + + // Is there an existing import of this package? + // If so, are we in its scope? (not shadowed) + for _, spec := range file.Imports { + pkgname, ok := importedPkgName(info, spec) + if ok && pkgname.Imported().Path() == pkgpath { + if _, obj := scope.LookupParent(pkgname.Name(), pos); obj == pkgname { + return pkgname.Name(), nil + } + } + } + + // We must add a new import. + // Ensure we have a fresh name. + newName := preferredName + for i := 0; ; i++ { + if _, obj := scope.LookupParent(newName, pos); obj == nil { + break // fresh + } + newName = fmt.Sprintf("%s%d", preferredName, i) + } + + // For now, keep it real simple: create a new import + // declaration before the first existing declaration (which + // must exist), including its comments, and let goimports tidy it up. + // + // Use a renaming import whenever the preferred name is not + // available, or the chosen name does not match the last + // segment of its path. + newText := fmt.Sprintf("import %q\n\n", pkgpath) + if newName != preferredName || newName != pathpkg.Base(pkgpath) { + newText = fmt.Sprintf("import %s %q\n\n", newName, pkgpath) + } + decl0 := file.Decls[0] + var before ast.Node = decl0 + switch decl0 := decl0.(type) { + case *ast.GenDecl: + if decl0.Doc != nil { + before = decl0.Doc + } + case *ast.FuncDecl: + if decl0.Doc != nil { + before = decl0.Doc + } + } + return newName, []analysis.TextEdit{{ + Pos: before.Pos(), + End: before.Pos(), + NewText: []byte(newText), + }} +} + +// importedPkgName returns the PkgName object declared by an ImportSpec. +// TODO(adonovan): use go1.22's Info.PkgNameOf. +func importedPkgName(info *types.Info, imp *ast.ImportSpec) (*types.PkgName, bool) { + var obj types.Object + if imp.Name != nil { + obj = info.Defs[imp.Name] + } else { + obj = info.Implicits[imp] + } + pkgname, ok := obj.(*types.PkgName) + return pkgname, ok +} diff --git a/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go b/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go new file mode 100644 index 00000000000..39507723d3d --- /dev/null +++ b/vendor/golang.org/x/tools/internal/analysisinternal/extractdoc.go @@ -0,0 +1,113 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package analysisinternal + +import ( + "fmt" + "go/parser" + "go/token" + "strings" +) + +// MustExtractDoc is like [ExtractDoc] but it panics on error. +// +// To use, define a doc.go file such as: +// +// // Package halting defines an analyzer of program termination. +// // +// // # Analyzer halting +// // +// // halting: reports whether execution will halt. +// // +// // The halting analyzer reports a diagnostic for functions +// // that run forever. To suppress the diagnostics, try inserting +// // a 'break' statement into each loop. +// package halting +// +// import _ "embed" +// +// //go:embed doc.go +// var doc string +// +// And declare your analyzer as: +// +// var Analyzer = &analysis.Analyzer{ +// Name: "halting", +// Doc: analysisutil.MustExtractDoc(doc, "halting"), +// ... +// } +func MustExtractDoc(content, name string) string { + doc, err := ExtractDoc(content, name) + if err != nil { + panic(err) + } + return doc +} + +// ExtractDoc extracts a section of a package doc comment from the +// provided contents of an analyzer package's doc.go file. +// +// A section is a portion of the comment between one heading and +// the next, using this form: +// +// # Analyzer NAME +// +// NAME: SUMMARY +// +// Full description... +// +// where NAME matches the name argument, and SUMMARY is a brief +// verb-phrase that describes the analyzer. The following lines, up +// until the next heading or the end of the comment, contain the full +// description. ExtractDoc returns the portion following the colon, +// which is the form expected by Analyzer.Doc. +// +// Example: +// +// # Analyzer printf +// +// printf: checks consistency of calls to printf +// +// The printf analyzer checks consistency of calls to printf. +// Here is the complete description... +// +// This notation allows a single doc comment to provide documentation +// for multiple analyzers, each in its own section. +// The HTML anchors generated for each heading are predictable. +// +// It returns an error if the content was not a valid Go source file +// containing a package doc comment with a heading of the required +// form. +// +// This machinery enables the package documentation (typically +// accessible via the web at https://pkg.go.dev/) and the command +// documentation (typically printed to a terminal) to be derived from +// the same source and formatted appropriately. +func ExtractDoc(content, name string) (string, error) { + if content == "" { + return "", fmt.Errorf("empty Go source file") + } + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, "", content, parser.ParseComments|parser.PackageClauseOnly) + if err != nil { + return "", fmt.Errorf("not a Go source file") + } + if f.Doc == nil { + return "", fmt.Errorf("Go source file has no package doc comment") + } + for _, section := range strings.Split(f.Doc.Text(), "\n# ") { + if body := strings.TrimPrefix(section, "Analyzer "+name); body != section && + body != "" && + body[0] == '\r' || body[0] == '\n' { + body = strings.TrimSpace(body) + rest := strings.TrimPrefix(body, name+":") + if rest == body { + return "", fmt.Errorf("'Analyzer %s' heading not followed by '%s: summary...' line", name, name) + } + return strings.TrimSpace(rest), nil + } + } + return "", fmt.Errorf("package doc comment contains no 'Analyzer %s' heading", name) +} diff --git a/vendor/golang.org/x/tools/internal/diff/diff.go b/vendor/golang.org/x/tools/internal/diff/diff.go new file mode 100644 index 00000000000..a13547b7a7e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/diff/diff.go @@ -0,0 +1,176 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package diff computes differences between text files or strings. +package diff + +import ( + "fmt" + "sort" + "strings" +) + +// An Edit describes the replacement of a portion of a text file. +type Edit struct { + Start, End int // byte offsets of the region to replace + New string // the replacement +} + +func (e Edit) String() string { + return fmt.Sprintf("{Start:%d,End:%d,New:%q}", e.Start, e.End, e.New) +} + +// Apply applies a sequence of edits to the src buffer and returns the +// result. Edits are applied in order of start offset; edits with the +// same start offset are applied in they order they were provided. +// +// Apply returns an error if any edit is out of bounds, +// or if any pair of edits is overlapping. +func Apply(src string, edits []Edit) (string, error) { + edits, size, err := validate(src, edits) + if err != nil { + return "", err + } + + // Apply edits. + out := make([]byte, 0, size) + lastEnd := 0 + for _, edit := range edits { + if lastEnd < edit.Start { + out = append(out, src[lastEnd:edit.Start]...) + } + out = append(out, edit.New...) + lastEnd = edit.End + } + out = append(out, src[lastEnd:]...) + + if len(out) != size { + panic("wrong size") + } + + return string(out), nil +} + +// ApplyBytes is like Apply, but it accepts a byte slice. +// The result is always a new array. +func ApplyBytes(src []byte, edits []Edit) ([]byte, error) { + res, err := Apply(string(src), edits) + return []byte(res), err +} + +// validate checks that edits are consistent with src, +// and returns the size of the patched output. +// It may return a different slice. +func validate(src string, edits []Edit) ([]Edit, int, error) { + if !sort.IsSorted(editsSort(edits)) { + edits = append([]Edit(nil), edits...) + SortEdits(edits) + } + + // Check validity of edits and compute final size. + size := len(src) + lastEnd := 0 + for _, edit := range edits { + if !(0 <= edit.Start && edit.Start <= edit.End && edit.End <= len(src)) { + return nil, 0, fmt.Errorf("diff has out-of-bounds edits") + } + if edit.Start < lastEnd { + return nil, 0, fmt.Errorf("diff has overlapping edits") + } + size += len(edit.New) + edit.Start - edit.End + lastEnd = edit.End + } + + return edits, size, nil +} + +// SortEdits orders a slice of Edits by (start, end) offset. +// This ordering puts insertions (end = start) before deletions +// (end > start) at the same point, but uses a stable sort to preserve +// the order of multiple insertions at the same point. +// (Apply detects multiple deletions at the same point as an error.) +func SortEdits(edits []Edit) { + sort.Stable(editsSort(edits)) +} + +type editsSort []Edit + +func (a editsSort) Len() int { return len(a) } +func (a editsSort) Less(i, j int) bool { + if cmp := a[i].Start - a[j].Start; cmp != 0 { + return cmp < 0 + } + return a[i].End < a[j].End +} +func (a editsSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// lineEdits expands and merges a sequence of edits so that each +// resulting edit replaces one or more complete lines. +// See ApplyEdits for preconditions. +func lineEdits(src string, edits []Edit) ([]Edit, error) { + edits, _, err := validate(src, edits) + if err != nil { + return nil, err + } + + // Do all deletions begin and end at the start of a line, + // and all insertions end with a newline? + // (This is merely a fast path.) + for _, edit := range edits { + if edit.Start >= len(src) || // insertion at EOF + edit.Start > 0 && src[edit.Start-1] != '\n' || // not at line start + edit.End > 0 && src[edit.End-1] != '\n' || // not at line start + edit.New != "" && edit.New[len(edit.New)-1] != '\n' { // partial insert + goto expand // slow path + } + } + return edits, nil // aligned + +expand: + if len(edits) == 0 { + return edits, nil // no edits (unreachable due to fast path) + } + expanded := make([]Edit, 0, len(edits)) // a guess + prev := edits[0] + // TODO(adonovan): opt: start from the first misaligned edit. + // TODO(adonovan): opt: avoid quadratic cost of string += string. + for _, edit := range edits[1:] { + between := src[prev.End:edit.Start] + if !strings.Contains(between, "\n") { + // overlapping lines: combine with previous edit. + prev.New += between + edit.New + prev.End = edit.End + } else { + // non-overlapping lines: flush previous edit. + expanded = append(expanded, expandEdit(prev, src)) + prev = edit + } + } + return append(expanded, expandEdit(prev, src)), nil // flush final edit +} + +// expandEdit returns edit expanded to complete whole lines. +func expandEdit(edit Edit, src string) Edit { + // Expand start left to start of line. + // (delta is the zero-based column number of start.) + start := edit.Start + if delta := start - 1 - strings.LastIndex(src[:start], "\n"); delta > 0 { + edit.Start -= delta + edit.New = src[start-delta:start] + edit.New + } + + // Expand end right to end of line. + end := edit.End + if end > 0 && src[end-1] != '\n' || + edit.New != "" && edit.New[len(edit.New)-1] != '\n' { + if nl := strings.IndexByte(src[end:], '\n'); nl < 0 { + edit.End = len(src) // extend to EOF + } else { + edit.End = end + nl + 1 // extend beyond \n + } + } + edit.New += src[end:edit.End] + + return edit +} diff --git a/vendor/golang.org/x/tools/internal/diff/lcs/common.go b/vendor/golang.org/x/tools/internal/diff/lcs/common.go new file mode 100644 index 00000000000..c3e82dd2683 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/diff/lcs/common.go @@ -0,0 +1,179 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +import ( + "log" + "sort" +) + +// lcs is a longest common sequence +type lcs []diag + +// A diag is a piece of the edit graph where A[X+i] == B[Y+i], for 0<=i l[j].Len + }) + return l +} + +// validate that the elements of the lcs do not overlap +// (can only happen when the two-sided algorithm ends early) +// expects the lcs to be sorted +func (l lcs) valid() bool { + for i := 1; i < len(l); i++ { + if l[i-1].X+l[i-1].Len > l[i].X { + return false + } + if l[i-1].Y+l[i-1].Len > l[i].Y { + return false + } + } + return true +} + +// repair overlapping lcs +// only called if two-sided stops early +func (l lcs) fix() lcs { + // from the set of diagonals in l, find a maximal non-conflicting set + // this problem may be NP-complete, but we use a greedy heuristic, + // which is quadratic, but with a better data structure, could be D log D. + // indepedent is not enough: {0,3,1} and {3,0,2} can't both occur in an lcs + // which has to have monotone x and y + if len(l) == 0 { + return nil + } + sort.Slice(l, func(i, j int) bool { return l[i].Len > l[j].Len }) + tmp := make(lcs, 0, len(l)) + tmp = append(tmp, l[0]) + for i := 1; i < len(l); i++ { + var dir direction + nxt := l[i] + for _, in := range tmp { + if dir, nxt = overlap(in, nxt); dir == empty || dir == bad { + break + } + } + if nxt.Len > 0 && dir != bad { + tmp = append(tmp, nxt) + } + } + tmp.sort() + if false && !tmp.valid() { // debug checking + log.Fatalf("here %d", len(tmp)) + } + return tmp +} + +type direction int + +const ( + empty direction = iota // diag is empty (so not in lcs) + leftdown // proposed acceptably to the left and below + rightup // proposed diag is acceptably to the right and above + bad // proposed diag is inconsistent with the lcs so far +) + +// overlap trims the proposed diag prop so it doesn't overlap with +// the existing diag that has already been added to the lcs. +func overlap(exist, prop diag) (direction, diag) { + if prop.X <= exist.X && exist.X < prop.X+prop.Len { + // remove the end of prop where it overlaps with the X end of exist + delta := prop.X + prop.Len - exist.X + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + } + if exist.X <= prop.X && prop.X < exist.X+exist.Len { + // remove the beginning of prop where overlaps with exist + delta := exist.X + exist.Len - prop.X + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + prop.X += delta + prop.Y += delta + } + if prop.Y <= exist.Y && exist.Y < prop.Y+prop.Len { + // remove the end of prop that overlaps (in Y) with exist + delta := prop.Y + prop.Len - exist.Y + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + } + if exist.Y <= prop.Y && prop.Y < exist.Y+exist.Len { + // remove the beginning of peop that overlaps with exist + delta := exist.Y + exist.Len - prop.Y + prop.Len -= delta + if prop.Len <= 0 { + return empty, prop + } + prop.X += delta // no test reaches this code + prop.Y += delta + } + if prop.X+prop.Len <= exist.X && prop.Y+prop.Len <= exist.Y { + return leftdown, prop + } + if exist.X+exist.Len <= prop.X && exist.Y+exist.Len <= prop.Y { + return rightup, prop + } + // prop can't be in an lcs that contains exist + return bad, prop +} + +// manipulating Diag and lcs + +// prepend a diagonal (x,y)-(x+1,y+1) segment either to an empty lcs +// or to its first Diag. prepend is only called to extend diagonals +// the backward direction. +func (lcs lcs) prepend(x, y int) lcs { + if len(lcs) > 0 { + d := &lcs[0] + if int(d.X) == x+1 && int(d.Y) == y+1 { + // extend the diagonal down and to the left + d.X, d.Y = int(x), int(y) + d.Len++ + return lcs + } + } + + r := diag{X: int(x), Y: int(y), Len: 1} + lcs = append([]diag{r}, lcs...) + return lcs +} + +// append appends a diagonal, or extends the existing one. +// by adding the edge (x,y)-(x+1.y+1). append is only called +// to extend diagonals in the forward direction. +func (lcs lcs) append(x, y int) lcs { + if len(lcs) > 0 { + last := &lcs[len(lcs)-1] + // Expand last element if adjoining. + if last.X+last.Len == x && last.Y+last.Len == y { + last.Len++ + return lcs + } + } + + return append(lcs, diag{X: x, Y: y, Len: 1}) +} + +// enforce constraint on d, k +func ok(d, k int) bool { + return d >= 0 && -d <= k && k <= d +} diff --git a/vendor/golang.org/x/tools/internal/diff/lcs/doc.go b/vendor/golang.org/x/tools/internal/diff/lcs/doc.go new file mode 100644 index 00000000000..9029dd20b3d --- /dev/null +++ b/vendor/golang.org/x/tools/internal/diff/lcs/doc.go @@ -0,0 +1,156 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// package lcs contains code to find longest-common-subsequences +// (and diffs) +package lcs + +/* +Compute longest-common-subsequences of two slices A, B using +algorithms from Myers' paper. A longest-common-subsequence +(LCS from now on) of A and B is a maximal set of lexically increasing +pairs of subscripts (x,y) with A[x]==B[y]. There may be many LCS, but +they all have the same length. An LCS determines a sequence of edits +that changes A into B. + +The key concept is the edit graph of A and B. +If A has length N and B has length M, then the edit graph has +vertices v[i][j] for 0 <= i <= N, 0 <= j <= M. There is a +horizontal edge from v[i][j] to v[i+1][j] whenever both are in +the graph, and a vertical edge from v[i][j] to f[i][j+1] similarly. +When A[i] == B[j] there is a diagonal edge from v[i][j] to v[i+1][j+1]. + +A path between in the graph between (0,0) and (N,M) determines a sequence +of edits converting A into B: each horizontal edge corresponds to removing +an element of A, and each vertical edge corresponds to inserting an +element of B. + +A vertex (x,y) is on (forward) diagonal k if x-y=k. A path in the graph +is of length D if it has D non-diagonal edges. The algorithms generate +forward paths (in which at least one of x,y increases at each edge), +or backward paths (in which at least one of x,y decreases at each edge), +or a combination. (Note that the orientation is the traditional mathematical one, +with the origin in the lower-left corner.) + +Here is the edit graph for A:"aabbaa", B:"aacaba". (I know the diagonals look weird.) + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + b | | | ___/‾‾‾ | ___/‾‾‾ | | | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + c | | | | | | | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a a b b a a + + +The algorithm labels a vertex (x,y) with D,k if it is on diagonal k and at +the end of a maximal path of length D. (Because x-y=k it suffices to remember +only the x coordinate of the vertex.) + +The forward algorithm: Find the longest diagonal starting at (0,0) and +label its end with D=0,k=0. From that vertex take a vertical step and +then follow the longest diagonal (up and to the right), and label that vertex +with D=1,k=-1. From the D=0,k=0 point take a horizontal step and the follow +the longest diagonal (up and to the right) and label that vertex +D=1,k=1. In the same way, having labelled all the D vertices, +from a vertex labelled D,k find two vertices +tentatively labelled D+1,k-1 and D+1,k+1. There may be two on the same +diagonal, in which case take the one with the larger x. + +Eventually the path gets to (N,M), and the diagonals on it are the LCS. + +Here is the edit graph with the ends of D-paths labelled. (So, for instance, +0/2,2 indicates that x=2,y=2 is labelled with 0, as it should be, since the first +step is to go up the longest diagonal from (0,0).) +A:"aabbaa", B:"aacaba" + ⊙ ------- ⊙ ------- ⊙ -------(3/3,6)------- ⊙ -------(3/5,6)-------(4/6,6) + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ -------(2/3,5)------- ⊙ ------- ⊙ ------- ⊙ + b | | | ___/‾‾‾ | ___/‾‾‾ | | | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ -------(3/5,4)------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ -------(1/2,3)-------(2/3,3)------- ⊙ ------- ⊙ ------- ⊙ + c | | | | | | | + ⊙ ------- ⊙ -------(0/2,2)-------(1/3,2)-------(2/4,2)-------(3/5,2)-------(4/6,2) + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a | ___/‾‾‾ | ___/‾‾‾ | | | ___/‾‾‾ | ___/‾‾‾ | + ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ ------- ⊙ + a a b b a a + +The 4-path is reconstructed starting at (4/6,6), horizontal to (3/5,6), diagonal to (3,4), vertical +to (2/3,3), horizontal to (1/2,3), vertical to (0/2,2), and diagonal to (0,0). As expected, +there are 4 non-diagonal steps, and the diagonals form an LCS. + +There is a symmetric backward algorithm, which gives (backwards labels are prefixed with a colon): +A:"aabbaa", B:"aacaba" + ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ + a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ | + ⊙ -------- ⊙ -------- ⊙ -------- ⊙ -------- ⊙ --------(:0/5,5)-------- ⊙ + b | | | ____/‾‾‾ | ____/‾‾‾ | | | + ⊙ -------- ⊙ -------- ⊙ --------(:1/3,4)-------- ⊙ -------- ⊙ -------- ⊙ + a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ | + (:3/0,3)--------(:2/1,3)-------- ⊙ --------(:2/3,3)--------(:1/4,3)-------- ⊙ -------- ⊙ + c | | | | | | | + ⊙ -------- ⊙ -------- ⊙ --------(:3/3,2)--------(:2/4,2)-------- ⊙ -------- ⊙ + a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ | + (:3/0,1)-------- ⊙ -------- ⊙ -------- ⊙ --------(:3/4,1)-------- ⊙ -------- ⊙ + a | ____/‾‾‾ | ____/‾‾‾ | | | ____/‾‾‾ | ____/‾‾‾ | + (:4/0,0)-------- ⊙ -------- ⊙ -------- ⊙ --------(:4/4,0)-------- ⊙ -------- ⊙ + a a b b a a + +Neither of these is ideal for use in an editor, where it is undesirable to send very long diffs to the +front end. It's tricky to decide exactly what 'very long diffs' means, as "replace A by B" is very short. +We want to control how big D can be, by stopping when it gets too large. The forward algorithm then +privileges common prefixes, and the backward algorithm privileges common suffixes. Either is an undesirable +asymmetry. + +Fortunately there is a two-sided algorithm, implied by results in Myers' paper. Here's what the labels in +the edit graph look like. +A:"aabbaa", B:"aacaba" + ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ + a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ | + ⊙ --------- ⊙ --------- ⊙ --------- (2/3,5) --------- ⊙ --------- (:0/5,5)--------- ⊙ + b | | | ____/‾‾‾‾ | ____/‾‾‾‾ | | | + ⊙ --------- ⊙ --------- ⊙ --------- (:1/3,4)--------- ⊙ --------- ⊙ --------- ⊙ + a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ | + ⊙ --------- (:2/1,3)--------- (1/2,3) ---------(2:2/3,3)--------- (:1/4,3)--------- ⊙ --------- ⊙ + c | | | | | | | + ⊙ --------- ⊙ --------- (0/2,2) --------- (1/3,2) ---------(2:2/4,2)--------- ⊙ --------- ⊙ + a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ | + ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ + a | ____/‾‾‾‾ | ____/‾‾‾‾ | | | ____/‾‾‾‾ | ____/‾‾‾‾ | + ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ --------- ⊙ + a a b b a a + +The algorithm stopped when it saw the backwards 2-path ending at (1,3) and the forwards 2-path ending at (3,5). The criterion +is a backwards path ending at (u,v) and a forward path ending at (x,y), where u <= x and the two points are on the same +diagonal. (Here the edgegraph has a diagonal, but the criterion is x-y=u-v.) Myers proves there is a forward +2-path from (0,0) to (1,3), and that together with the backwards 2-path ending at (1,3) gives the expected 4-path. +Unfortunately the forward path has to be constructed by another run of the forward algorithm; it can't be found from the +computed labels. That is the worst case. Had the code noticed (x,y)=(u,v)=(3,3) the whole path could be reconstructed +from the edgegraph. The implementation looks for a number of special cases to try to avoid computing an extra forward path. + +If the two-sided algorithm has stop early (because D has become too large) it will have found a forward LCS and a +backwards LCS. Ideally these go with disjoint prefixes and suffixes of A and B, but disjointness may fail and the two +computed LCS may conflict. (An easy example is where A is a suffix of B, and shares a short prefix. The backwards LCS +is all of A, and the forward LCS is a prefix of A.) The algorithm combines the two +to form a best-effort LCS. In the worst case the forward partial LCS may have to +be recomputed. +*/ + +/* Eugene Myers paper is titled +"An O(ND) Difference Algorithm and Its Variations" +and can be found at +http://www.xmailserver.org/diff2.pdf + +(There is a generic implementation of the algorithm the repository with git hash +b9ad7e4ade3a686d608e44475390ad428e60e7fc) +*/ diff --git a/vendor/golang.org/x/tools/internal/diff/lcs/git.sh b/vendor/golang.org/x/tools/internal/diff/lcs/git.sh new file mode 100644 index 00000000000..b25ba4aac74 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/diff/lcs/git.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# +# Copyright 2022 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. +# +# Creates a zip file containing all numbered versions +# of the commit history of a large source file, for use +# as input data for the tests of the diff algorithm. +# +# Run script from root of the x/tools repo. + +set -eu + +# WARNING: This script will install the latest version of $file +# The largest real source file in the x/tools repo. +# file=internal/golang/completion/completion.go +# file=internal/golang/diagnostics.go +file=internal/protocol/tsprotocol.go + +tmp=$(mktemp -d) +git log $file | + awk '/^commit / {print $2}' | + nl -ba -nrz | + while read n hash; do + git checkout --quiet $hash $file + cp -f $file $tmp/$n + done +(cd $tmp && zip -q - *) > testdata.zip +rm -fr $tmp +git restore --staged $file +git restore $file +echo "Created testdata.zip" diff --git a/vendor/golang.org/x/tools/internal/diff/lcs/labels.go b/vendor/golang.org/x/tools/internal/diff/lcs/labels.go new file mode 100644 index 00000000000..504913d1da3 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/diff/lcs/labels.go @@ -0,0 +1,55 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +import ( + "fmt" +) + +// For each D, vec[D] has length D+1, +// and the label for (D, k) is stored in vec[D][(D+k)/2]. +type label struct { + vec [][]int +} + +// Temporary checking DO NOT COMMIT true TO PRODUCTION CODE +const debug = false + +// debugging. check that the (d,k) pair is valid +// (that is, -d<=k<=d and d+k even) +func checkDK(D, k int) { + if k >= -D && k <= D && (D+k)%2 == 0 { + return + } + panic(fmt.Sprintf("out of range, d=%d,k=%d", D, k)) +} + +func (t *label) set(D, k, x int) { + if debug { + checkDK(D, k) + } + for len(t.vec) <= D { + t.vec = append(t.vec, nil) + } + if t.vec[D] == nil { + t.vec[D] = make([]int, D+1) + } + t.vec[D][(D+k)/2] = x // known that D+k is even +} + +func (t *label) get(d, k int) int { + if debug { + checkDK(d, k) + } + return int(t.vec[d][(d+k)/2]) +} + +func newtriang(limit int) label { + if limit < 100 { + // Preallocate if limit is not large. + return label{vec: make([][]int, limit)} + } + return label{} +} diff --git a/vendor/golang.org/x/tools/internal/diff/lcs/old.go b/vendor/golang.org/x/tools/internal/diff/lcs/old.go new file mode 100644 index 00000000000..4353da15ba9 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/diff/lcs/old.go @@ -0,0 +1,480 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +// TODO(adonovan): remove unclear references to "old" in this package. + +import ( + "fmt" +) + +// A Diff is a replacement of a portion of A by a portion of B. +type Diff struct { + Start, End int // offsets of portion to delete in A + ReplStart, ReplEnd int // offset of replacement text in B +} + +// DiffStrings returns the differences between two strings. +// It does not respect rune boundaries. +func DiffStrings(a, b string) []Diff { return diff(stringSeqs{a, b}) } + +// DiffBytes returns the differences between two byte sequences. +// It does not respect rune boundaries. +func DiffBytes(a, b []byte) []Diff { return diff(bytesSeqs{a, b}) } + +// DiffRunes returns the differences between two rune sequences. +func DiffRunes(a, b []rune) []Diff { return diff(runesSeqs{a, b}) } + +func diff(seqs sequences) []Diff { + // A limit on how deeply the LCS algorithm should search. The value is just a guess. + const maxDiffs = 100 + diff, _ := compute(seqs, twosided, maxDiffs/2) + return diff +} + +// compute computes the list of differences between two sequences, +// along with the LCS. It is exercised directly by tests. +// The algorithm is one of {forward, backward, twosided}. +func compute(seqs sequences, algo func(*editGraph) lcs, limit int) ([]Diff, lcs) { + if limit <= 0 { + limit = 1 << 25 // effectively infinity + } + alen, blen := seqs.lengths() + g := &editGraph{ + seqs: seqs, + vf: newtriang(limit), + vb: newtriang(limit), + limit: limit, + ux: alen, + uy: blen, + delta: alen - blen, + } + lcs := algo(g) + diffs := lcs.toDiffs(alen, blen) + return diffs, lcs +} + +// editGraph carries the information for computing the lcs of two sequences. +type editGraph struct { + seqs sequences + vf, vb label // forward and backward labels + + limit int // maximal value of D + // the bounding rectangle of the current edit graph + lx, ly, ux, uy int + delta int // common subexpression: (ux-lx)-(uy-ly) +} + +// toDiffs converts an LCS to a list of edits. +func (lcs lcs) toDiffs(alen, blen int) []Diff { + var diffs []Diff + var pa, pb int // offsets in a, b + for _, l := range lcs { + if pa < l.X || pb < l.Y { + diffs = append(diffs, Diff{pa, l.X, pb, l.Y}) + } + pa = l.X + l.Len + pb = l.Y + l.Len + } + if pa < alen || pb < blen { + diffs = append(diffs, Diff{pa, alen, pb, blen}) + } + return diffs +} + +// --- FORWARD --- + +// fdone decides if the forward path has reached the upper right +// corner of the rectangle. If so, it also returns the computed lcs. +func (e *editGraph) fdone(D, k int) (bool, lcs) { + // x, y, k are relative to the rectangle + x := e.vf.get(D, k) + y := x - k + if x == e.ux && y == e.uy { + return true, e.forwardlcs(D, k) + } + return false, nil +} + +// run the forward algorithm, until success or up to the limit on D. +func forward(e *editGraph) lcs { + e.setForward(0, 0, e.lx) + if ok, ans := e.fdone(0, 0); ok { + return ans + } + // from D to D+1 + for D := 0; D < e.limit; D++ { + e.setForward(D+1, -(D + 1), e.getForward(D, -D)) + if ok, ans := e.fdone(D+1, -(D + 1)); ok { + return ans + } + e.setForward(D+1, D+1, e.getForward(D, D)+1) + if ok, ans := e.fdone(D+1, D+1); ok { + return ans + } + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get backwards + lookv := e.lookForward(k, e.getForward(D, k-1)+1) + lookh := e.lookForward(k, e.getForward(D, k+1)) + if lookv > lookh { + e.setForward(D+1, k, lookv) + } else { + e.setForward(D+1, k, lookh) + } + if ok, ans := e.fdone(D+1, k); ok { + return ans + } + } + } + // D is too large + // find the D path with maximal x+y inside the rectangle and + // use that to compute the found part of the lcs + kmax := -e.limit - 1 + diagmax := -1 + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getForward(e.limit, k) + y := x - k + if x+y > diagmax && x <= e.ux && y <= e.uy { + diagmax, kmax = x+y, k + } + } + return e.forwardlcs(e.limit, kmax) +} + +// recover the lcs by backtracking from the farthest point reached +func (e *editGraph) forwardlcs(D, k int) lcs { + var ans lcs + for x := e.getForward(D, k); x != 0 || x-k != 0; { + if ok(D-1, k-1) && x-1 == e.getForward(D-1, k-1) { + // if (x-1,y) is labelled D-1, x--,D--,k--,continue + D, k, x = D-1, k-1, x-1 + continue + } else if ok(D-1, k+1) && x == e.getForward(D-1, k+1) { + // if (x,y-1) is labelled D-1, x, D--,k++, continue + D, k = D-1, k+1 + continue + } + // if (x-1,y-1)--(x,y) is a diagonal, prepend,x--,y--, continue + y := x - k + ans = ans.prepend(x+e.lx-1, y+e.ly-1) + x-- + } + return ans +} + +// start at (x,y), go up the diagonal as far as possible, +// and label the result with d +func (e *editGraph) lookForward(k, relx int) int { + rely := relx - k + x, y := relx+e.lx, rely+e.ly + if x < e.ux && y < e.uy { + x += e.seqs.commonPrefixLen(x, e.ux, y, e.uy) + } + return x +} + +func (e *editGraph) setForward(d, k, relx int) { + x := e.lookForward(k, relx) + e.vf.set(d, k, x-e.lx) +} + +func (e *editGraph) getForward(d, k int) int { + x := e.vf.get(d, k) + return x +} + +// --- BACKWARD --- + +// bdone decides if the backward path has reached the lower left corner +func (e *editGraph) bdone(D, k int) (bool, lcs) { + // x, y, k are relative to the rectangle + x := e.vb.get(D, k) + y := x - (k + e.delta) + if x == 0 && y == 0 { + return true, e.backwardlcs(D, k) + } + return false, nil +} + +// run the backward algorithm, until success or up to the limit on D. +func backward(e *editGraph) lcs { + e.setBackward(0, 0, e.ux) + if ok, ans := e.bdone(0, 0); ok { + return ans + } + // from D to D+1 + for D := 0; D < e.limit; D++ { + e.setBackward(D+1, -(D + 1), e.getBackward(D, -D)-1) + if ok, ans := e.bdone(D+1, -(D + 1)); ok { + return ans + } + e.setBackward(D+1, D+1, e.getBackward(D, D)) + if ok, ans := e.bdone(D+1, D+1); ok { + return ans + } + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get wrong + lookv := e.lookBackward(k, e.getBackward(D, k-1)) + lookh := e.lookBackward(k, e.getBackward(D, k+1)-1) + if lookv < lookh { + e.setBackward(D+1, k, lookv) + } else { + e.setBackward(D+1, k, lookh) + } + if ok, ans := e.bdone(D+1, k); ok { + return ans + } + } + } + + // D is too large + // find the D path with minimal x+y inside the rectangle and + // use that to compute the part of the lcs found + kmax := -e.limit - 1 + diagmin := 1 << 25 + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getBackward(e.limit, k) + y := x - (k + e.delta) + if x+y < diagmin && x >= 0 && y >= 0 { + diagmin, kmax = x+y, k + } + } + if kmax < -e.limit { + panic(fmt.Sprintf("no paths when limit=%d?", e.limit)) + } + return e.backwardlcs(e.limit, kmax) +} + +// recover the lcs by backtracking +func (e *editGraph) backwardlcs(D, k int) lcs { + var ans lcs + for x := e.getBackward(D, k); x != e.ux || x-(k+e.delta) != e.uy; { + if ok(D-1, k-1) && x == e.getBackward(D-1, k-1) { + // D--, k--, x unchanged + D, k = D-1, k-1 + continue + } else if ok(D-1, k+1) && x+1 == e.getBackward(D-1, k+1) { + // D--, k++, x++ + D, k, x = D-1, k+1, x+1 + continue + } + y := x - (k + e.delta) + ans = ans.append(x+e.lx, y+e.ly) + x++ + } + return ans +} + +// start at (x,y), go down the diagonal as far as possible, +func (e *editGraph) lookBackward(k, relx int) int { + rely := relx - (k + e.delta) // forward k = k + e.delta + x, y := relx+e.lx, rely+e.ly + if x > 0 && y > 0 { + x -= e.seqs.commonSuffixLen(0, x, 0, y) + } + return x +} + +// convert to rectangle, and label the result with d +func (e *editGraph) setBackward(d, k, relx int) { + x := e.lookBackward(k, relx) + e.vb.set(d, k, x-e.lx) +} + +func (e *editGraph) getBackward(d, k int) int { + x := e.vb.get(d, k) + return x +} + +// -- TWOSIDED --- + +func twosided(e *editGraph) lcs { + // The termination condition could be improved, as either the forward + // or backward pass could succeed before Myers' Lemma applies. + // Aside from questions of efficiency (is the extra testing cost-effective) + // this is more likely to matter when e.limit is reached. + e.setForward(0, 0, e.lx) + e.setBackward(0, 0, e.ux) + + // from D to D+1 + for D := 0; D < e.limit; D++ { + // just finished a backwards pass, so check + if got, ok := e.twoDone(D, D); ok { + return e.twolcs(D, D, got) + } + // do a forwards pass (D to D+1) + e.setForward(D+1, -(D + 1), e.getForward(D, -D)) + e.setForward(D+1, D+1, e.getForward(D, D)+1) + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get backwards + lookv := e.lookForward(k, e.getForward(D, k-1)+1) + lookh := e.lookForward(k, e.getForward(D, k+1)) + if lookv > lookh { + e.setForward(D+1, k, lookv) + } else { + e.setForward(D+1, k, lookh) + } + } + // just did a forward pass, so check + if got, ok := e.twoDone(D+1, D); ok { + return e.twolcs(D+1, D, got) + } + // do a backward pass, D to D+1 + e.setBackward(D+1, -(D + 1), e.getBackward(D, -D)-1) + e.setBackward(D+1, D+1, e.getBackward(D, D)) + for k := -D + 1; k <= D-1; k += 2 { + // these are tricky and easy to get wrong + lookv := e.lookBackward(k, e.getBackward(D, k-1)) + lookh := e.lookBackward(k, e.getBackward(D, k+1)-1) + if lookv < lookh { + e.setBackward(D+1, k, lookv) + } else { + e.setBackward(D+1, k, lookh) + } + } + } + + // D too large. combine a forward and backward partial lcs + // first, a forward one + kmax := -e.limit - 1 + diagmax := -1 + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getForward(e.limit, k) + y := x - k + if x+y > diagmax && x <= e.ux && y <= e.uy { + diagmax, kmax = x+y, k + } + } + if kmax < -e.limit { + panic(fmt.Sprintf("no forward paths when limit=%d?", e.limit)) + } + lcs := e.forwardlcs(e.limit, kmax) + // now a backward one + // find the D path with minimal x+y inside the rectangle and + // use that to compute the lcs + diagmin := 1 << 25 // infinity + for k := -e.limit; k <= e.limit; k += 2 { + x := e.getBackward(e.limit, k) + y := x - (k + e.delta) + if x+y < diagmin && x >= 0 && y >= 0 { + diagmin, kmax = x+y, k + } + } + if kmax < -e.limit { + panic(fmt.Sprintf("no backward paths when limit=%d?", e.limit)) + } + lcs = append(lcs, e.backwardlcs(e.limit, kmax)...) + // These may overlap (e.forwardlcs and e.backwardlcs return sorted lcs) + ans := lcs.fix() + return ans +} + +// Does Myers' Lemma apply? +func (e *editGraph) twoDone(df, db int) (int, bool) { + if (df+db+e.delta)%2 != 0 { + return 0, false // diagonals cannot overlap + } + kmin := -db + e.delta + if -df > kmin { + kmin = -df + } + kmax := db + e.delta + if df < kmax { + kmax = df + } + for k := kmin; k <= kmax; k += 2 { + x := e.vf.get(df, k) + u := e.vb.get(db, k-e.delta) + if u <= x { + // is it worth looking at all the other k? + for l := k; l <= kmax; l += 2 { + x := e.vf.get(df, l) + y := x - l + u := e.vb.get(db, l-e.delta) + v := u - l + if x == u || u == 0 || v == 0 || y == e.uy || x == e.ux { + return l, true + } + } + return k, true + } + } + return 0, false +} + +func (e *editGraph) twolcs(df, db, kf int) lcs { + // db==df || db+1==df + x := e.vf.get(df, kf) + y := x - kf + kb := kf - e.delta + u := e.vb.get(db, kb) + v := u - kf + + // Myers proved there is a df-path from (0,0) to (u,v) + // and a db-path from (x,y) to (N,M). + // In the first case the overall path is the forward path + // to (u,v) followed by the backward path to (N,M). + // In the second case the path is the backward path to (x,y) + // followed by the forward path to (x,y) from (0,0). + + // Look for some special cases to avoid computing either of these paths. + if x == u { + // "babaab" "cccaba" + // already patched together + lcs := e.forwardlcs(df, kf) + lcs = append(lcs, e.backwardlcs(db, kb)...) + return lcs.sort() + } + + // is (u-1,v) or (u,v-1) labelled df-1? + // if so, that forward df-1-path plus a horizontal or vertical edge + // is the df-path to (u,v), then plus the db-path to (N,M) + if u > 0 && ok(df-1, u-1-v) && e.vf.get(df-1, u-1-v) == u-1 { + // "aabbab" "cbcabc" + lcs := e.forwardlcs(df-1, u-1-v) + lcs = append(lcs, e.backwardlcs(db, kb)...) + return lcs.sort() + } + if v > 0 && ok(df-1, (u-(v-1))) && e.vf.get(df-1, u-(v-1)) == u { + // "abaabb" "bcacab" + lcs := e.forwardlcs(df-1, u-(v-1)) + lcs = append(lcs, e.backwardlcs(db, kb)...) + return lcs.sort() + } + + // The path can't possibly contribute to the lcs because it + // is all horizontal or vertical edges + if u == 0 || v == 0 || x == e.ux || y == e.uy { + // "abaabb" "abaaaa" + if u == 0 || v == 0 { + return e.backwardlcs(db, kb) + } + return e.forwardlcs(df, kf) + } + + // is (x+1,y) or (x,y+1) labelled db-1? + if x+1 <= e.ux && ok(db-1, x+1-y-e.delta) && e.vb.get(db-1, x+1-y-e.delta) == x+1 { + // "bababb" "baaabb" + lcs := e.backwardlcs(db-1, kb+1) + lcs = append(lcs, e.forwardlcs(df, kf)...) + return lcs.sort() + } + if y+1 <= e.uy && ok(db-1, x-(y+1)-e.delta) && e.vb.get(db-1, x-(y+1)-e.delta) == x { + // "abbbaa" "cabacc" + lcs := e.backwardlcs(db-1, kb-1) + lcs = append(lcs, e.forwardlcs(df, kf)...) + return lcs.sort() + } + + // need to compute another path + // "aabbaa" "aacaba" + lcs := e.backwardlcs(db, kb) + oldx, oldy := e.ux, e.uy + e.ux = u + e.uy = v + lcs = append(lcs, forward(e)...) + e.ux, e.uy = oldx, oldy + return lcs.sort() +} diff --git a/vendor/golang.org/x/tools/internal/diff/lcs/sequence.go b/vendor/golang.org/x/tools/internal/diff/lcs/sequence.go new file mode 100644 index 00000000000..2d72d263043 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/diff/lcs/sequence.go @@ -0,0 +1,113 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lcs + +// This file defines the abstract sequence over which the LCS algorithm operates. + +// sequences abstracts a pair of sequences, A and B. +type sequences interface { + lengths() (int, int) // len(A), len(B) + commonPrefixLen(ai, aj, bi, bj int) int // len(commonPrefix(A[ai:aj], B[bi:bj])) + commonSuffixLen(ai, aj, bi, bj int) int // len(commonSuffix(A[ai:aj], B[bi:bj])) +} + +type stringSeqs struct{ a, b string } + +func (s stringSeqs) lengths() (int, int) { return len(s.a), len(s.b) } +func (s stringSeqs) commonPrefixLen(ai, aj, bi, bj int) int { + return commonPrefixLenString(s.a[ai:aj], s.b[bi:bj]) +} +func (s stringSeqs) commonSuffixLen(ai, aj, bi, bj int) int { + return commonSuffixLenString(s.a[ai:aj], s.b[bi:bj]) +} + +// The explicit capacity in s[i:j:j] leads to more efficient code. + +type bytesSeqs struct{ a, b []byte } + +func (s bytesSeqs) lengths() (int, int) { return len(s.a), len(s.b) } +func (s bytesSeqs) commonPrefixLen(ai, aj, bi, bj int) int { + return commonPrefixLenBytes(s.a[ai:aj:aj], s.b[bi:bj:bj]) +} +func (s bytesSeqs) commonSuffixLen(ai, aj, bi, bj int) int { + return commonSuffixLenBytes(s.a[ai:aj:aj], s.b[bi:bj:bj]) +} + +type runesSeqs struct{ a, b []rune } + +func (s runesSeqs) lengths() (int, int) { return len(s.a), len(s.b) } +func (s runesSeqs) commonPrefixLen(ai, aj, bi, bj int) int { + return commonPrefixLenRunes(s.a[ai:aj:aj], s.b[bi:bj:bj]) +} +func (s runesSeqs) commonSuffixLen(ai, aj, bi, bj int) int { + return commonSuffixLenRunes(s.a[ai:aj:aj], s.b[bi:bj:bj]) +} + +// TODO(adonovan): optimize these functions using ideas from: +// - https://go.dev/cl/408116 common.go +// - https://go.dev/cl/421435 xor_generic.go + +// TODO(adonovan): factor using generics when available, +// but measure performance impact. + +// commonPrefixLen* returns the length of the common prefix of a[ai:aj] and b[bi:bj]. +func commonPrefixLenBytes(a, b []byte) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[i] == b[i] { + i++ + } + return i +} +func commonPrefixLenRunes(a, b []rune) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[i] == b[i] { + i++ + } + return i +} +func commonPrefixLenString(a, b string) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[i] == b[i] { + i++ + } + return i +} + +// commonSuffixLen* returns the length of the common suffix of a[ai:aj] and b[bi:bj]. +func commonSuffixLenBytes(a, b []byte) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[len(a)-1-i] == b[len(b)-1-i] { + i++ + } + return i +} +func commonSuffixLenRunes(a, b []rune) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[len(a)-1-i] == b[len(b)-1-i] { + i++ + } + return i +} +func commonSuffixLenString(a, b string) int { + n := min(len(a), len(b)) + i := 0 + for i < n && a[len(a)-1-i] == b[len(b)-1-i] { + i++ + } + return i +} + +func min(x, y int) int { + if x < y { + return x + } else { + return y + } +} diff --git a/vendor/golang.org/x/tools/internal/diff/ndiff.go b/vendor/golang.org/x/tools/internal/diff/ndiff.go new file mode 100644 index 00000000000..fbef4d730c5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/diff/ndiff.go @@ -0,0 +1,99 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package diff + +import ( + "bytes" + "unicode/utf8" + + "golang.org/x/tools/internal/diff/lcs" +) + +// Strings computes the differences between two strings. +// The resulting edits respect rune boundaries. +func Strings(before, after string) []Edit { + if before == after { + return nil // common case + } + + if isASCII(before) && isASCII(after) { + // TODO(adonovan): opt: specialize diffASCII for strings. + return diffASCII([]byte(before), []byte(after)) + } + return diffRunes([]rune(before), []rune(after)) +} + +// Bytes computes the differences between two byte slices. +// The resulting edits respect rune boundaries. +func Bytes(before, after []byte) []Edit { + if bytes.Equal(before, after) { + return nil // common case + } + + if isASCII(before) && isASCII(after) { + return diffASCII(before, after) + } + return diffRunes(runes(before), runes(after)) +} + +func diffASCII(before, after []byte) []Edit { + diffs := lcs.DiffBytes(before, after) + + // Convert from LCS diffs. + res := make([]Edit, len(diffs)) + for i, d := range diffs { + res[i] = Edit{d.Start, d.End, string(after[d.ReplStart:d.ReplEnd])} + } + return res +} + +func diffRunes(before, after []rune) []Edit { + diffs := lcs.DiffRunes(before, after) + + // The diffs returned by the lcs package use indexes + // into whatever slice was passed in. + // Convert rune offsets to byte offsets. + res := make([]Edit, len(diffs)) + lastEnd := 0 + utf8Len := 0 + for i, d := range diffs { + utf8Len += runesLen(before[lastEnd:d.Start]) // text between edits + start := utf8Len + utf8Len += runesLen(before[d.Start:d.End]) // text deleted by this edit + res[i] = Edit{start, utf8Len, string(after[d.ReplStart:d.ReplEnd])} + lastEnd = d.End + } + return res +} + +// runes is like []rune(string(bytes)) without the duplicate allocation. +func runes(bytes []byte) []rune { + n := utf8.RuneCount(bytes) + runes := make([]rune, n) + for i := 0; i < n; i++ { + r, sz := utf8.DecodeRune(bytes) + bytes = bytes[sz:] + runes[i] = r + } + return runes +} + +// runesLen returns the length in bytes of the UTF-8 encoding of runes. +func runesLen(runes []rune) (len int) { + for _, r := range runes { + len += utf8.RuneLen(r) + } + return len +} + +// isASCII reports whether s contains only ASCII. +func isASCII[S string | []byte](s S) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/tools/internal/diff/unified.go b/vendor/golang.org/x/tools/internal/diff/unified.go new file mode 100644 index 00000000000..cfbda61020a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/diff/unified.go @@ -0,0 +1,251 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package diff + +import ( + "fmt" + "log" + "strings" +) + +// DefaultContextLines is the number of unchanged lines of surrounding +// context displayed by Unified. Use ToUnified to specify a different value. +const DefaultContextLines = 3 + +// Unified returns a unified diff of the old and new strings. +// The old and new labels are the names of the old and new files. +// If the strings are equal, it returns the empty string. +func Unified(oldLabel, newLabel, old, new string) string { + edits := Strings(old, new) + unified, err := ToUnified(oldLabel, newLabel, old, edits, DefaultContextLines) + if err != nil { + // Can't happen: edits are consistent. + log.Fatalf("internal error in diff.Unified: %v", err) + } + return unified +} + +// ToUnified applies the edits to content and returns a unified diff, +// with contextLines lines of (unchanged) context around each diff hunk. +// The old and new labels are the names of the content and result files. +// It returns an error if the edits are inconsistent; see ApplyEdits. +func ToUnified(oldLabel, newLabel, content string, edits []Edit, contextLines int) (string, error) { + u, err := toUnified(oldLabel, newLabel, content, edits, contextLines) + if err != nil { + return "", err + } + return u.String(), nil +} + +// unified represents a set of edits as a unified diff. +type unified struct { + // from is the name of the original file. + from string + // to is the name of the modified file. + to string + // hunks is the set of edit hunks needed to transform the file content. + hunks []*hunk +} + +// Hunk represents a contiguous set of line edits to apply. +type hunk struct { + // The line in the original source where the hunk starts. + fromLine int + // The line in the original source where the hunk finishes. + toLine int + // The set of line based edits to apply. + lines []line +} + +// Line represents a single line operation to apply as part of a Hunk. +type line struct { + // kind is the type of line this represents, deletion, insertion or copy. + kind opKind + // content is the content of this line. + // For deletion it is the line being removed, for all others it is the line + // to put in the output. + content string +} + +// opKind is used to denote the type of operation a line represents. +type opKind int + +const ( + // opDelete is the operation kind for a line that is present in the input + // but not in the output. + opDelete opKind = iota + // opInsert is the operation kind for a line that is new in the output. + opInsert + // opEqual is the operation kind for a line that is the same in the input and + // output, often used to provide context around edited lines. + opEqual +) + +// String returns a human readable representation of an OpKind. It is not +// intended for machine processing. +func (k opKind) String() string { + switch k { + case opDelete: + return "delete" + case opInsert: + return "insert" + case opEqual: + return "equal" + default: + panic("unknown operation kind") + } +} + +// toUnified takes a file contents and a sequence of edits, and calculates +// a unified diff that represents those edits. +func toUnified(fromName, toName string, content string, edits []Edit, contextLines int) (unified, error) { + gap := contextLines * 2 + u := unified{ + from: fromName, + to: toName, + } + if len(edits) == 0 { + return u, nil + } + var err error + edits, err = lineEdits(content, edits) // expand to whole lines + if err != nil { + return u, err + } + lines := splitLines(content) + var h *hunk + last := 0 + toLine := 0 + for _, edit := range edits { + // Compute the zero-based line numbers of the edit start and end. + // TODO(adonovan): opt: compute incrementally, avoid O(n^2). + start := strings.Count(content[:edit.Start], "\n") + end := strings.Count(content[:edit.End], "\n") + if edit.End == len(content) && len(content) > 0 && content[len(content)-1] != '\n' { + end++ // EOF counts as an implicit newline + } + + switch { + case h != nil && start == last: + //direct extension + case h != nil && start <= last+gap: + //within range of previous lines, add the joiners + addEqualLines(h, lines, last, start) + default: + //need to start a new hunk + if h != nil { + // add the edge to the previous hunk + addEqualLines(h, lines, last, last+contextLines) + u.hunks = append(u.hunks, h) + } + toLine += start - last + h = &hunk{ + fromLine: start + 1, + toLine: toLine + 1, + } + // add the edge to the new hunk + delta := addEqualLines(h, lines, start-contextLines, start) + h.fromLine -= delta + h.toLine -= delta + } + last = start + for i := start; i < end; i++ { + h.lines = append(h.lines, line{kind: opDelete, content: lines[i]}) + last++ + } + if edit.New != "" { + for _, content := range splitLines(edit.New) { + h.lines = append(h.lines, line{kind: opInsert, content: content}) + toLine++ + } + } + } + if h != nil { + // add the edge to the final hunk + addEqualLines(h, lines, last, last+contextLines) + u.hunks = append(u.hunks, h) + } + return u, nil +} + +func splitLines(text string) []string { + lines := strings.SplitAfter(text, "\n") + if lines[len(lines)-1] == "" { + lines = lines[:len(lines)-1] + } + return lines +} + +func addEqualLines(h *hunk, lines []string, start, end int) int { + delta := 0 + for i := start; i < end; i++ { + if i < 0 { + continue + } + if i >= len(lines) { + return delta + } + h.lines = append(h.lines, line{kind: opEqual, content: lines[i]}) + delta++ + } + return delta +} + +// String converts a unified diff to the standard textual form for that diff. +// The output of this function can be passed to tools like patch. +func (u unified) String() string { + if len(u.hunks) == 0 { + return "" + } + b := new(strings.Builder) + fmt.Fprintf(b, "--- %s\n", u.from) + fmt.Fprintf(b, "+++ %s\n", u.to) + for _, hunk := range u.hunks { + fromCount, toCount := 0, 0 + for _, l := range hunk.lines { + switch l.kind { + case opDelete: + fromCount++ + case opInsert: + toCount++ + default: + fromCount++ + toCount++ + } + } + fmt.Fprint(b, "@@") + if fromCount > 1 { + fmt.Fprintf(b, " -%d,%d", hunk.fromLine, fromCount) + } else if hunk.fromLine == 1 && fromCount == 0 { + // Match odd GNU diff -u behavior adding to empty file. + fmt.Fprintf(b, " -0,0") + } else { + fmt.Fprintf(b, " -%d", hunk.fromLine) + } + if toCount > 1 { + fmt.Fprintf(b, " +%d,%d", hunk.toLine, toCount) + } else if hunk.toLine == 1 && toCount == 0 { + // Match odd GNU diff -u behavior adding to empty file. + fmt.Fprintf(b, " +0,0") + } else { + fmt.Fprintf(b, " +%d", hunk.toLine) + } + fmt.Fprint(b, " @@\n") + for _, l := range hunk.lines { + switch l.kind { + case opDelete: + fmt.Fprintf(b, "-%s", l.content) + case opInsert: + fmt.Fprintf(b, "+%s", l.content) + default: + fmt.Fprintf(b, " %s", l.content) + } + if !strings.HasSuffix(l.content, "\n") { + fmt.Fprintf(b, "\n\\ No newline at end of file\n") + } + } + } + return b.String() +} diff --git a/vendor/golang.org/x/tools/internal/facts/facts.go b/vendor/golang.org/x/tools/internal/facts/facts.go new file mode 100644 index 00000000000..e1c18d373c3 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/facts/facts.go @@ -0,0 +1,389 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package facts defines a serializable set of analysis.Fact. +// +// It provides a partial implementation of the Fact-related parts of the +// analysis.Pass interface for use in analysis drivers such as "go vet" +// and other build systems. +// +// The serial format is unspecified and may change, so the same version +// of this package must be used for reading and writing serialized facts. +// +// The handling of facts in the analysis system parallels the handling +// of type information in the compiler: during compilation of package P, +// the compiler emits an export data file that describes the type of +// every object (named thing) defined in package P, plus every object +// indirectly reachable from one of those objects. Thus the downstream +// compiler of package Q need only load one export data file per direct +// import of Q, and it will learn everything about the API of package P +// and everything it needs to know about the API of P's dependencies. +// +// Similarly, analysis of package P emits a fact set containing facts +// about all objects exported from P, plus additional facts about only +// those objects of P's dependencies that are reachable from the API of +// package P; the downstream analysis of Q need only load one fact set +// per direct import of Q. +// +// The notion of "exportedness" that matters here is that of the +// compiler. According to the language spec, a method pkg.T.f is +// unexported simply because its name starts with lowercase. But the +// compiler must nonetheless export f so that downstream compilations can +// accurately ascertain whether pkg.T implements an interface pkg.I +// defined as interface{f()}. Exported thus means "described in export +// data". +package facts + +import ( + "bytes" + "encoding/gob" + "fmt" + "go/types" + "io" + "log" + "reflect" + "sort" + "sync" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/types/objectpath" +) + +const debug = false + +// A Set is a set of analysis.Facts. +// +// Decode creates a Set of facts by reading from the imports of a given +// package, and Encode writes out the set. Between these operation, +// the Import and Export methods will query and update the set. +// +// All of Set's methods except String are safe to call concurrently. +type Set struct { + pkg *types.Package + mu sync.Mutex + m map[key]analysis.Fact +} + +type key struct { + pkg *types.Package + obj types.Object // (object facts only) + t reflect.Type +} + +// ImportObjectFact implements analysis.Pass.ImportObjectFact. +func (s *Set) ImportObjectFact(obj types.Object, ptr analysis.Fact) bool { + if obj == nil { + panic("nil object") + } + key := key{pkg: obj.Pkg(), obj: obj, t: reflect.TypeOf(ptr)} + s.mu.Lock() + defer s.mu.Unlock() + if v, ok := s.m[key]; ok { + reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) + return true + } + return false +} + +// ExportObjectFact implements analysis.Pass.ExportObjectFact. +func (s *Set) ExportObjectFact(obj types.Object, fact analysis.Fact) { + if obj.Pkg() != s.pkg { + log.Panicf("in package %s: ExportObjectFact(%s, %T): can't set fact on object belonging another package", + s.pkg, obj, fact) + } + key := key{pkg: obj.Pkg(), obj: obj, t: reflect.TypeOf(fact)} + s.mu.Lock() + s.m[key] = fact // clobber any existing entry + s.mu.Unlock() +} + +func (s *Set) AllObjectFacts(filter map[reflect.Type]bool) []analysis.ObjectFact { + var facts []analysis.ObjectFact + s.mu.Lock() + for k, v := range s.m { + if k.obj != nil && filter[k.t] { + facts = append(facts, analysis.ObjectFact{Object: k.obj, Fact: v}) + } + } + s.mu.Unlock() + return facts +} + +// ImportPackageFact implements analysis.Pass.ImportPackageFact. +func (s *Set) ImportPackageFact(pkg *types.Package, ptr analysis.Fact) bool { + if pkg == nil { + panic("nil package") + } + key := key{pkg: pkg, t: reflect.TypeOf(ptr)} + s.mu.Lock() + defer s.mu.Unlock() + if v, ok := s.m[key]; ok { + reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem()) + return true + } + return false +} + +// ExportPackageFact implements analysis.Pass.ExportPackageFact. +func (s *Set) ExportPackageFact(fact analysis.Fact) { + key := key{pkg: s.pkg, t: reflect.TypeOf(fact)} + s.mu.Lock() + s.m[key] = fact // clobber any existing entry + s.mu.Unlock() +} + +func (s *Set) AllPackageFacts(filter map[reflect.Type]bool) []analysis.PackageFact { + var facts []analysis.PackageFact + s.mu.Lock() + for k, v := range s.m { + if k.obj == nil && filter[k.t] { + facts = append(facts, analysis.PackageFact{Package: k.pkg, Fact: v}) + } + } + s.mu.Unlock() + return facts +} + +// gobFact is the Gob declaration of a serialized fact. +type gobFact struct { + PkgPath string // path of package + Object objectpath.Path // optional path of object relative to package itself + Fact analysis.Fact // type and value of user-defined Fact +} + +// A Decoder decodes the facts from the direct imports of the package +// provided to NewEncoder. A single decoder may be used to decode +// multiple fact sets (e.g. each for a different set of fact types) +// for the same package. Each call to Decode returns an independent +// fact set. +type Decoder struct { + pkg *types.Package + getPackage GetPackageFunc +} + +// NewDecoder returns a fact decoder for the specified package. +// +// It uses a brute-force recursive approach to enumerate all objects +// defined by dependencies of pkg, so that it can learn the set of +// package paths that may be mentioned in the fact encoding. This does +// not scale well; use [NewDecoderFunc] where possible. +func NewDecoder(pkg *types.Package) *Decoder { + // Compute the import map for this package. + // See the package doc comment. + m := importMap(pkg.Imports()) + getPackageFunc := func(path string) *types.Package { return m[path] } + return NewDecoderFunc(pkg, getPackageFunc) +} + +// NewDecoderFunc returns a fact decoder for the specified package. +// +// It calls the getPackage function for the package path string of +// each dependency (perhaps indirect) that it encounters in the +// encoding. If the function returns nil, the fact is discarded. +// +// This function is preferred over [NewDecoder] when the client is +// capable of efficient look-up of packages by package path. +func NewDecoderFunc(pkg *types.Package, getPackage GetPackageFunc) *Decoder { + return &Decoder{ + pkg: pkg, + getPackage: getPackage, + } +} + +// A GetPackageFunc function returns the package denoted by a package path. +type GetPackageFunc = func(pkgPath string) *types.Package + +// Decode decodes all the facts relevant to the analysis of package +// pkgPath. The read function reads serialized fact data from an external +// source for one of pkg's direct imports, identified by package path. +// The empty file is a valid encoding of an empty fact set. +// +// It is the caller's responsibility to call gob.Register on all +// necessary fact types. +// +// Concurrent calls to Decode are safe, so long as the +// [GetPackageFunc] (if any) is also concurrency-safe. +func (d *Decoder) Decode(read func(pkgPath string) ([]byte, error)) (*Set, error) { + // Read facts from imported packages. + // Facts may describe indirectly imported packages, or their objects. + m := make(map[key]analysis.Fact) // one big bucket + for _, imp := range d.pkg.Imports() { + logf := func(format string, args ...interface{}) { + if debug { + prefix := fmt.Sprintf("in %s, importing %s: ", + d.pkg.Path(), imp.Path()) + log.Print(prefix, fmt.Sprintf(format, args...)) + } + } + + // Read the gob-encoded facts. + data, err := read(imp.Path()) + if err != nil { + return nil, fmt.Errorf("in %s, can't import facts for package %q: %v", + d.pkg.Path(), imp.Path(), err) + } + if len(data) == 0 { + continue // no facts + } + var gobFacts []gobFact + if err := gob.NewDecoder(bytes.NewReader(data)).Decode(&gobFacts); err != nil { + return nil, fmt.Errorf("decoding facts for %q: %v", imp.Path(), err) + } + logf("decoded %d facts: %v", len(gobFacts), gobFacts) + + // Parse each one into a key and a Fact. + for _, f := range gobFacts { + factPkg := d.getPackage(f.PkgPath) // possibly an indirect dependency + if factPkg == nil { + // Fact relates to a dependency that was + // unused in this translation unit. Skip. + logf("no package %q; discarding %v", f.PkgPath, f.Fact) + continue + } + key := key{pkg: factPkg, t: reflect.TypeOf(f.Fact)} + if f.Object != "" { + // object fact + obj, err := objectpath.Object(factPkg, f.Object) + if err != nil { + // (most likely due to unexported object) + // TODO(adonovan): audit for other possibilities. + logf("no object for path: %v; discarding %s", err, f.Fact) + continue + } + key.obj = obj + logf("read %T fact %s for %v", f.Fact, f.Fact, key.obj) + } else { + // package fact + logf("read %T fact %s for %v", f.Fact, f.Fact, factPkg) + } + m[key] = f.Fact + } + } + + return &Set{pkg: d.pkg, m: m}, nil +} + +// Encode encodes a set of facts to a memory buffer. +// +// It may fail if one of the Facts could not be gob-encoded, but this is +// a sign of a bug in an Analyzer. +func (s *Set) Encode() []byte { + encoder := new(objectpath.Encoder) + + // TODO(adonovan): opt: use a more efficient encoding + // that avoids repeating PkgPath for each fact. + + // Gather all facts, including those from imported packages. + var gobFacts []gobFact + + s.mu.Lock() + for k, fact := range s.m { + if debug { + log.Printf("%v => %s\n", k, fact) + } + + // Don't export facts that we imported from another + // package, unless they represent fields or methods, + // or package-level types. + // (Facts about packages, and other package-level + // objects, are only obtained from direct imports so + // they needn't be reexported.) + // + // This is analogous to the pruning done by "deep" + // export data for types, but not as precise because + // we aren't careful about which structs or methods + // we rexport: it should be only those referenced + // from the API of s.pkg. + // TODO(adonovan): opt: be more precise. e.g. + // intersect with the set of objects computed by + // importMap(s.pkg.Imports()). + // TODO(adonovan): opt: implement "shallow" facts. + if k.pkg != s.pkg { + if k.obj == nil { + continue // imported package fact + } + if _, isType := k.obj.(*types.TypeName); !isType && + k.obj.Parent() == k.obj.Pkg().Scope() { + continue // imported fact about package-level non-type object + } + } + + var object objectpath.Path + if k.obj != nil { + path, err := encoder.For(k.obj) + if err != nil { + if debug { + log.Printf("discarding fact %s about %s\n", fact, k.obj) + } + continue // object not accessible from package API; discard fact + } + object = path + } + gobFacts = append(gobFacts, gobFact{ + PkgPath: k.pkg.Path(), + Object: object, + Fact: fact, + }) + } + s.mu.Unlock() + + // Sort facts by (package, object, type) for determinism. + sort.Slice(gobFacts, func(i, j int) bool { + x, y := gobFacts[i], gobFacts[j] + if x.PkgPath != y.PkgPath { + return x.PkgPath < y.PkgPath + } + if x.Object != y.Object { + return x.Object < y.Object + } + tx := reflect.TypeOf(x.Fact) + ty := reflect.TypeOf(y.Fact) + if tx != ty { + return tx.String() < ty.String() + } + return false // equal + }) + + var buf bytes.Buffer + if len(gobFacts) > 0 { + if err := gob.NewEncoder(&buf).Encode(gobFacts); err != nil { + // Fact encoding should never fail. Identify the culprit. + for _, gf := range gobFacts { + if err := gob.NewEncoder(io.Discard).Encode(gf); err != nil { + fact := gf.Fact + pkgpath := reflect.TypeOf(fact).Elem().PkgPath() + log.Panicf("internal error: gob encoding of analysis fact %s failed: %v; please report a bug against fact %T in package %q", + fact, err, fact, pkgpath) + } + } + } + } + + if debug { + log.Printf("package %q: encode %d facts, %d bytes\n", + s.pkg.Path(), len(gobFacts), buf.Len()) + } + + return buf.Bytes() +} + +// String is provided only for debugging, and must not be called +// concurrent with any Import/Export method. +func (s *Set) String() string { + var buf bytes.Buffer + buf.WriteString("{") + for k, f := range s.m { + if buf.Len() > 1 { + buf.WriteString(", ") + } + if k.obj != nil { + buf.WriteString(k.obj.String()) + } else { + buf.WriteString(k.pkg.Path()) + } + fmt.Fprintf(&buf, ": %v", f) + } + buf.WriteString("}") + return buf.String() +} diff --git a/vendor/golang.org/x/tools/internal/facts/imports.go b/vendor/golang.org/x/tools/internal/facts/imports.go new file mode 100644 index 00000000000..9f706cd954f --- /dev/null +++ b/vendor/golang.org/x/tools/internal/facts/imports.go @@ -0,0 +1,136 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package facts + +import ( + "go/types" + + "golang.org/x/tools/internal/aliases" +) + +// importMap computes the import map for a package by traversing the +// entire exported API each of its imports. +// +// This is a workaround for the fact that we cannot access the map used +// internally by the types.Importer returned by go/importer. The entries +// in this map are the packages and objects that may be relevant to the +// current analysis unit. +// +// Packages in the map that are only indirectly imported may be +// incomplete (!pkg.Complete()). +// +// This function scales very poorly with packages' transitive object +// references, which can be more than a million for each package near +// the top of a large project. (This was a significant contributor to +// #60621.) +// TODO(adonovan): opt: compute this information more efficiently +// by obtaining it from the internals of the gcexportdata decoder. +func importMap(imports []*types.Package) map[string]*types.Package { + objects := make(map[types.Object]bool) + typs := make(map[types.Type]bool) // Named and TypeParam + packages := make(map[string]*types.Package) + + var addObj func(obj types.Object) + var addType func(T types.Type) + + addObj = func(obj types.Object) { + if !objects[obj] { + objects[obj] = true + addType(obj.Type()) + if pkg := obj.Pkg(); pkg != nil { + packages[pkg.Path()] = pkg + } + } + } + + addType = func(T types.Type) { + switch T := T.(type) { + case *aliases.Alias: + addType(aliases.Unalias(T)) + case *types.Basic: + // nop + case *types.Named: + // Remove infinite expansions of *types.Named by always looking at the origin. + // Some named types with type parameters [that will not type check] have + // infinite expansions: + // type N[T any] struct { F *N[N[T]] } + // importMap() is called on such types when Analyzer.RunDespiteErrors is true. + T = T.Origin() + if !typs[T] { + typs[T] = true + addObj(T.Obj()) + addType(T.Underlying()) + for i := 0; i < T.NumMethods(); i++ { + addObj(T.Method(i)) + } + if tparams := T.TypeParams(); tparams != nil { + for i := 0; i < tparams.Len(); i++ { + addType(tparams.At(i)) + } + } + if targs := T.TypeArgs(); targs != nil { + for i := 0; i < targs.Len(); i++ { + addType(targs.At(i)) + } + } + } + case *types.Pointer: + addType(T.Elem()) + case *types.Slice: + addType(T.Elem()) + case *types.Array: + addType(T.Elem()) + case *types.Chan: + addType(T.Elem()) + case *types.Map: + addType(T.Key()) + addType(T.Elem()) + case *types.Signature: + addType(T.Params()) + addType(T.Results()) + if tparams := T.TypeParams(); tparams != nil { + for i := 0; i < tparams.Len(); i++ { + addType(tparams.At(i)) + } + } + case *types.Struct: + for i := 0; i < T.NumFields(); i++ { + addObj(T.Field(i)) + } + case *types.Tuple: + for i := 0; i < T.Len(); i++ { + addObj(T.At(i)) + } + case *types.Interface: + for i := 0; i < T.NumMethods(); i++ { + addObj(T.Method(i)) + } + for i := 0; i < T.NumEmbeddeds(); i++ { + addType(T.EmbeddedType(i)) // walk Embedded for implicits + } + case *types.Union: + for i := 0; i < T.Len(); i++ { + addType(T.Term(i).Type()) + } + case *types.TypeParam: + if !typs[T] { + typs[T] = true + addObj(T.Obj()) + addType(T.Constraint()) + } + } + } + + for _, imp := range imports { + packages[imp.Path()] = imp + + scope := imp.Scope() + for _, name := range scope.Names() { + addObj(scope.Lookup(name)) + } + } + + return packages +} diff --git a/vendor/golang.org/x/tools/internal/goroot/importcfg.go b/vendor/golang.org/x/tools/internal/goroot/importcfg.go new file mode 100644 index 00000000000..f1cd28e2ec3 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/goroot/importcfg.go @@ -0,0 +1,71 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package goroot is a copy of package internal/goroot +// in the main GO repot. It provides a utility to produce +// an importcfg and import path to package file map mapping +// standard library packages to the locations of their export +// data files. +package goroot + +import ( + "bytes" + "fmt" + "os/exec" + "strings" + "sync" +) + +// Importcfg returns an importcfg file to be passed to the +// Go compiler that contains the cached paths for the .a files for the +// standard library. +func Importcfg() (string, error) { + var icfg bytes.Buffer + + m, err := PkgfileMap() + if err != nil { + return "", err + } + fmt.Fprintf(&icfg, "# import config") + for importPath, export := range m { + fmt.Fprintf(&icfg, "\npackagefile %s=%s", importPath, export) + } + s := icfg.String() + return s, nil +} + +var ( + stdlibPkgfileMap map[string]string + stdlibPkgfileErr error + once sync.Once +) + +// PkgfileMap returns a map of package paths to the location on disk +// of the .a file for the package. +// The caller must not modify the map. +func PkgfileMap() (map[string]string, error) { + once.Do(func() { + m := make(map[string]string) + output, err := exec.Command("go", "list", "-export", "-e", "-f", "{{.ImportPath}} {{.Export}}", "std", "cmd").Output() + if err != nil { + stdlibPkgfileErr = err + } + for _, line := range strings.Split(string(output), "\n") { + if line == "" { + continue + } + sp := strings.SplitN(line, " ", 2) + if len(sp) != 2 { + err = fmt.Errorf("determining pkgfile map: invalid line in go list output: %q", line) + return + } + importPath, export := sp[0], sp[1] + if export != "" { + m[importPath] = export + } + } + stdlibPkgfileMap = m + }) + return stdlibPkgfileMap, stdlibPkgfileErr +} diff --git a/vendor/golang.org/x/tools/internal/robustio/gopls_windows.go b/vendor/golang.org/x/tools/internal/robustio/gopls_windows.go new file mode 100644 index 00000000000..949f2781619 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/robustio/gopls_windows.go @@ -0,0 +1,16 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package robustio + +import "syscall" + +// The robustio package is copied from cmd/go/internal/robustio, a package used +// by the go command to retry known flaky operations on certain operating systems. + +//go:generate go run copyfiles.go + +// Since the gopls module cannot access internal/syscall/windows, copy a +// necessary constant. +const ERROR_SHARING_VIOLATION syscall.Errno = 32 diff --git a/vendor/golang.org/x/tools/internal/robustio/robustio.go b/vendor/golang.org/x/tools/internal/robustio/robustio.go new file mode 100644 index 00000000000..0a559fc9b80 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/robustio/robustio.go @@ -0,0 +1,69 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package robustio wraps I/O functions that are prone to failure on Windows, +// transparently retrying errors up to an arbitrary timeout. +// +// Errors are classified heuristically and retries are bounded, so the functions +// in this package do not completely eliminate spurious errors. However, they do +// significantly reduce the rate of failure in practice. +// +// If so, the error will likely wrap one of: +// The functions in this package do not completely eliminate spurious errors, +// but substantially reduce their rate of occurrence in practice. +package robustio + +import "time" + +// Rename is like os.Rename, but on Windows retries errors that may occur if the +// file is concurrently read or overwritten. +// +// (See golang.org/issue/31247 and golang.org/issue/32188.) +func Rename(oldpath, newpath string) error { + return rename(oldpath, newpath) +} + +// ReadFile is like os.ReadFile, but on Windows retries errors that may +// occur if the file is concurrently replaced. +// +// (See golang.org/issue/31247 and golang.org/issue/32188.) +func ReadFile(filename string) ([]byte, error) { + return readFile(filename) +} + +// RemoveAll is like os.RemoveAll, but on Windows retries errors that may occur +// if an executable file in the directory has recently been executed. +// +// (See golang.org/issue/19491.) +func RemoveAll(path string) error { + return removeAll(path) +} + +// IsEphemeralError reports whether err is one of the errors that the functions +// in this package attempt to mitigate. +// +// Errors considered ephemeral include: +// - syscall.ERROR_ACCESS_DENIED +// - syscall.ERROR_FILE_NOT_FOUND +// - internal/syscall/windows.ERROR_SHARING_VIOLATION +// +// This set may be expanded in the future; programs must not rely on the +// non-ephemerality of any given error. +func IsEphemeralError(err error) bool { + return isEphemeralError(err) +} + +// A FileID uniquely identifies a file in the file system. +// +// If GetFileID(name1) returns the same ID as GetFileID(name2), the two file +// names denote the same file. +// A FileID is comparable, and thus suitable for use as a map key. +type FileID struct { + device, inode uint64 +} + +// GetFileID returns the file system's identifier for the file, and its +// modification time. +// Like os.Stat, it reads through symbolic links. +func GetFileID(filename string) (FileID, time.Time, error) { return getFileID(filename) } diff --git a/vendor/golang.org/x/tools/internal/robustio/robustio_darwin.go b/vendor/golang.org/x/tools/internal/robustio/robustio_darwin.go new file mode 100644 index 00000000000..99fd8ebc2ff --- /dev/null +++ b/vendor/golang.org/x/tools/internal/robustio/robustio_darwin.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package robustio + +import ( + "errors" + "syscall" +) + +const errFileNotFound = syscall.ENOENT + +// isEphemeralError returns true if err may be resolved by waiting. +func isEphemeralError(err error) bool { + var errno syscall.Errno + if errors.As(err, &errno) { + return errno == errFileNotFound + } + return false +} diff --git a/vendor/golang.org/x/tools/internal/robustio/robustio_flaky.go b/vendor/golang.org/x/tools/internal/robustio/robustio_flaky.go new file mode 100644 index 00000000000..d5c241857b4 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/robustio/robustio_flaky.go @@ -0,0 +1,92 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows || darwin +// +build windows darwin + +package robustio + +import ( + "errors" + "math/rand" + "os" + "syscall" + "time" +) + +const arbitraryTimeout = 2000 * time.Millisecond + +// retry retries ephemeral errors from f up to an arbitrary timeout +// to work around filesystem flakiness on Windows and Darwin. +func retry(f func() (err error, mayRetry bool)) error { + var ( + bestErr error + lowestErrno syscall.Errno + start time.Time + nextSleep time.Duration = 1 * time.Millisecond + ) + for { + err, mayRetry := f() + if err == nil || !mayRetry { + return err + } + + var errno syscall.Errno + if errors.As(err, &errno) && (lowestErrno == 0 || errno < lowestErrno) { + bestErr = err + lowestErrno = errno + } else if bestErr == nil { + bestErr = err + } + + if start.IsZero() { + start = time.Now() + } else if d := time.Since(start) + nextSleep; d >= arbitraryTimeout { + break + } + time.Sleep(nextSleep) + nextSleep += time.Duration(rand.Int63n(int64(nextSleep))) + } + + return bestErr +} + +// rename is like os.Rename, but retries ephemeral errors. +// +// On Windows it wraps os.Rename, which (as of 2019-06-04) uses MoveFileEx with +// MOVEFILE_REPLACE_EXISTING. +// +// Windows also provides a different system call, ReplaceFile, +// that provides similar semantics, but perhaps preserves more metadata. (The +// documentation on the differences between the two is very sparse.) +// +// Empirical error rates with MoveFileEx are lower under modest concurrency, so +// for now we're sticking with what the os package already provides. +func rename(oldpath, newpath string) (err error) { + return retry(func() (err error, mayRetry bool) { + err = os.Rename(oldpath, newpath) + return err, isEphemeralError(err) + }) +} + +// readFile is like os.ReadFile, but retries ephemeral errors. +func readFile(filename string) ([]byte, error) { + var b []byte + err := retry(func() (err error, mayRetry bool) { + b, err = os.ReadFile(filename) + + // Unlike in rename, we do not retry errFileNotFound here: it can occur + // as a spurious error, but the file may also genuinely not exist, so the + // increase in robustness is probably not worth the extra latency. + return err, isEphemeralError(err) && !errors.Is(err, errFileNotFound) + }) + return b, err +} + +func removeAll(path string) error { + return retry(func() (err error, mayRetry bool) { + err = os.RemoveAll(path) + return err, isEphemeralError(err) + }) +} diff --git a/vendor/golang.org/x/tools/internal/robustio/robustio_other.go b/vendor/golang.org/x/tools/internal/robustio/robustio_other.go new file mode 100644 index 00000000000..3a20cac6cf8 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/robustio/robustio_other.go @@ -0,0 +1,28 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows && !darwin +// +build !windows,!darwin + +package robustio + +import ( + "os" +) + +func rename(oldpath, newpath string) error { + return os.Rename(oldpath, newpath) +} + +func readFile(filename string) ([]byte, error) { + return os.ReadFile(filename) +} + +func removeAll(path string) error { + return os.RemoveAll(path) +} + +func isEphemeralError(err error) bool { + return false +} diff --git a/vendor/golang.org/x/tools/internal/robustio/robustio_plan9.go b/vendor/golang.org/x/tools/internal/robustio/robustio_plan9.go new file mode 100644 index 00000000000..9fa4cacb5a3 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/robustio/robustio_plan9.go @@ -0,0 +1,26 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build plan9 +// +build plan9 + +package robustio + +import ( + "os" + "syscall" + "time" +) + +func getFileID(filename string) (FileID, time.Time, error) { + fi, err := os.Stat(filename) + if err != nil { + return FileID{}, time.Time{}, err + } + dir := fi.Sys().(*syscall.Dir) + return FileID{ + device: uint64(dir.Type)<<32 | uint64(dir.Dev), + inode: dir.Qid.Path, + }, fi.ModTime(), nil +} diff --git a/vendor/golang.org/x/tools/internal/robustio/robustio_posix.go b/vendor/golang.org/x/tools/internal/robustio/robustio_posix.go new file mode 100644 index 00000000000..cf74865d0b5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/robustio/robustio_posix.go @@ -0,0 +1,26 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows && !plan9 +// +build !windows,!plan9 + +package robustio + +import ( + "os" + "syscall" + "time" +) + +func getFileID(filename string) (FileID, time.Time, error) { + fi, err := os.Stat(filename) + if err != nil { + return FileID{}, time.Time{}, err + } + stat := fi.Sys().(*syscall.Stat_t) + return FileID{ + device: uint64(stat.Dev), // (int32 on darwin, uint64 on linux) + inode: stat.Ino, + }, fi.ModTime(), nil +} diff --git a/vendor/golang.org/x/tools/internal/robustio/robustio_windows.go b/vendor/golang.org/x/tools/internal/robustio/robustio_windows.go new file mode 100644 index 00000000000..616c32883d6 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/robustio/robustio_windows.go @@ -0,0 +1,51 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package robustio + +import ( + "errors" + "syscall" + "time" +) + +const errFileNotFound = syscall.ERROR_FILE_NOT_FOUND + +// isEphemeralError returns true if err may be resolved by waiting. +func isEphemeralError(err error) bool { + var errno syscall.Errno + if errors.As(err, &errno) { + switch errno { + case syscall.ERROR_ACCESS_DENIED, + syscall.ERROR_FILE_NOT_FOUND, + ERROR_SHARING_VIOLATION: + return true + } + } + return false +} + +// Note: it may be convenient to have this helper return fs.FileInfo, but +// implementing this is actually quite involved on Windows. Since we only +// currently use mtime, keep it simple. +func getFileID(filename string) (FileID, time.Time, error) { + filename16, err := syscall.UTF16PtrFromString(filename) + if err != nil { + return FileID{}, time.Time{}, err + } + h, err := syscall.CreateFile(filename16, 0, 0, nil, syscall.OPEN_EXISTING, uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS), 0) + if err != nil { + return FileID{}, time.Time{}, err + } + defer syscall.CloseHandle(h) + var i syscall.ByHandleFileInformation + if err := syscall.GetFileInformationByHandle(h, &i); err != nil { + return FileID{}, time.Time{}, err + } + mtime := time.Unix(0, i.LastWriteTime.Nanoseconds()) + return FileID{ + device: uint64(i.VolumeSerialNumber), + inode: uint64(i.FileIndexHigh)<<32 | uint64(i.FileIndexLow), + }, mtime, nil +} diff --git a/vendor/golang.org/x/tools/internal/testenv/exec.go b/vendor/golang.org/x/tools/internal/testenv/exec.go new file mode 100644 index 00000000000..f2ab5f5eb8d --- /dev/null +++ b/vendor/golang.org/x/tools/internal/testenv/exec.go @@ -0,0 +1,192 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package testenv + +import ( + "context" + "flag" + "os" + "os/exec" + "reflect" + "runtime" + "strconv" + "sync" + "testing" + "time" +) + +// HasExec reports whether the current system can start new processes +// using os.StartProcess or (more commonly) exec.Command. +func HasExec() bool { + switch runtime.GOOS { + case "aix", + "android", + "darwin", + "dragonfly", + "freebsd", + "illumos", + "linux", + "netbsd", + "openbsd", + "plan9", + "solaris", + "windows": + // Known OS that isn't ios or wasm; assume that exec works. + return true + + case "ios", "js", "wasip1": + // ios has an exec syscall but on real iOS devices it might return a + // permission error. In an emulated environment (such as a Corellium host) + // it might succeed, so try it and find out. + // + // As of 2023-04-19 wasip1 and js don't have exec syscalls at all, but we + // may as well use the same path so that this branch can be tested without + // an ios environment. + fallthrough + + default: + tryExecOnce.Do(func() { + exe, err := os.Executable() + if err != nil { + return + } + if flag.Lookup("test.list") == nil { + // We found the executable, but we don't know how to run it in a way + // that should succeed without side-effects. Just forget it. + return + } + // We know that a test executable exists and can run, because we're + // running it now. Use it to check for overall exec support, but be sure + // to remove any environment variables that might trigger non-default + // behavior in a custom TestMain. + cmd := exec.Command(exe, "-test.list=^$") + cmd.Env = []string{} + if err := cmd.Run(); err == nil { + tryExecOk = true + } + }) + return tryExecOk + } +} + +var ( + tryExecOnce sync.Once + tryExecOk bool +) + +// NeedsExec checks that the current system can start new processes +// using os.StartProcess or (more commonly) exec.Command. +// If not, NeedsExec calls t.Skip with an explanation. +func NeedsExec(t testing.TB) { + if !HasExec() { + t.Skipf("skipping test: cannot exec subprocess on %s/%s", runtime.GOOS, runtime.GOARCH) + } +} + +// CommandContext is like exec.CommandContext, but: +// - skips t if the platform does not support os/exec, +// - if supported, sends SIGQUIT instead of SIGKILL in its Cancel function +// - if the test has a deadline, adds a Context timeout and (if supported) WaitDelay +// for an arbitrary grace period before the test's deadline expires, +// - if Cmd has the Cancel field, fails the test if the command is canceled +// due to the test's deadline, and +// - sets a Cleanup function that verifies that the test did not leak a subprocess. +func CommandContext(t testing.TB, ctx context.Context, name string, args ...string) *exec.Cmd { + t.Helper() + NeedsExec(t) + + var ( + cancelCtx context.CancelFunc + gracePeriod time.Duration // unlimited unless the test has a deadline (to allow for interactive debugging) + ) + + if td, ok := Deadline(t); ok { + // Start with a minimum grace period, just long enough to consume the + // output of a reasonable program after it terminates. + gracePeriod = 100 * time.Millisecond + if s := os.Getenv("GO_TEST_TIMEOUT_SCALE"); s != "" { + scale, err := strconv.Atoi(s) + if err != nil { + t.Fatalf("invalid GO_TEST_TIMEOUT_SCALE: %v", err) + } + gracePeriod *= time.Duration(scale) + } + + // If time allows, increase the termination grace period to 5% of the + // test's remaining time. + testTimeout := time.Until(td) + if gp := testTimeout / 20; gp > gracePeriod { + gracePeriod = gp + } + + // When we run commands that execute subprocesses, we want to reserve two + // grace periods to clean up: one for the delay between the first + // termination signal being sent (via the Cancel callback when the Context + // expires) and the process being forcibly terminated (via the WaitDelay + // field), and a second one for the delay between the process being + // terminated and the test logging its output for debugging. + // + // (We want to ensure that the test process itself has enough time to + // log the output before it is also terminated.) + cmdTimeout := testTimeout - 2*gracePeriod + + if cd, ok := ctx.Deadline(); !ok || time.Until(cd) > cmdTimeout { + // Either ctx doesn't have a deadline, or its deadline would expire + // after (or too close before) the test has already timed out. + // Add a shorter timeout so that the test will produce useful output. + ctx, cancelCtx = context.WithTimeout(ctx, cmdTimeout) + } + } + + cmd := exec.CommandContext(ctx, name, args...) + + // Use reflection to set the Cancel and WaitDelay fields, if present. + // TODO(bcmills): When we no longer support Go versions below 1.20, + // remove the use of reflect and assume that the fields are always present. + rc := reflect.ValueOf(cmd).Elem() + + if rCancel := rc.FieldByName("Cancel"); rCancel.IsValid() { + rCancel.Set(reflect.ValueOf(func() error { + if cancelCtx != nil && ctx.Err() == context.DeadlineExceeded { + // The command timed out due to running too close to the test's deadline + // (because we specifically set a shorter Context deadline for that + // above). There is no way the test did that intentionally — it's too + // close to the wire! — so mark it as a test failure. That way, if the + // test expects the command to fail for some other reason, it doesn't + // have to distinguish between that reason and a timeout. + t.Errorf("test timed out while running command: %v", cmd) + } else { + // The command is being terminated due to ctx being canceled, but + // apparently not due to an explicit test deadline that we added. + // Log that information in case it is useful for diagnosing a failure, + // but don't actually fail the test because of it. + t.Logf("%v: terminating command: %v", ctx.Err(), cmd) + } + return cmd.Process.Signal(Sigquit) + })) + } + + if rWaitDelay := rc.FieldByName("WaitDelay"); rWaitDelay.IsValid() { + rWaitDelay.Set(reflect.ValueOf(gracePeriod)) + } + + t.Cleanup(func() { + if cancelCtx != nil { + cancelCtx() + } + if cmd.Process != nil && cmd.ProcessState == nil { + t.Errorf("command was started, but test did not wait for it to complete: %v", cmd) + } + }) + + return cmd +} + +// Command is like exec.Command, but applies the same changes as +// testenv.CommandContext (with a default Context). +func Command(t testing.TB, name string, args ...string) *exec.Cmd { + t.Helper() + return CommandContext(t, context.Background(), name, args...) +} diff --git a/vendor/golang.org/x/tools/internal/testenv/testenv.go b/vendor/golang.org/x/tools/internal/testenv/testenv.go new file mode 100644 index 00000000000..d4a17ce039a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/testenv/testenv.go @@ -0,0 +1,492 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package testenv contains helper functions for skipping tests +// based on which tools are present in the environment. +package testenv + +import ( + "bytes" + "fmt" + "go/build" + "os" + "os/exec" + "path/filepath" + "runtime" + "runtime/debug" + "strings" + "sync" + "testing" + "time" + + "golang.org/x/mod/modfile" + "golang.org/x/tools/internal/goroot" +) + +// packageMainIsDevel reports whether the module containing package main +// is a development version (if module information is available). +func packageMainIsDevel() bool { + info, ok := debug.ReadBuildInfo() + if !ok { + // Most test binaries currently lack build info, but this should become more + // permissive once https://golang.org/issue/33976 is fixed. + return true + } + + // Note: info.Main.Version describes the version of the module containing + // package main, not the version of “the main module”. + // See https://golang.org/issue/33975. + return info.Main.Version == "(devel)" +} + +var checkGoBuild struct { + once sync.Once + err error +} + +// HasTool reports an error if the required tool is not available in PATH. +// +// For certain tools, it checks that the tool executable is correct. +func HasTool(tool string) error { + if tool == "cgo" { + enabled, err := cgoEnabled(false) + if err != nil { + return fmt.Errorf("checking cgo: %v", err) + } + if !enabled { + return fmt.Errorf("cgo not enabled") + } + return nil + } + + _, err := exec.LookPath(tool) + if err != nil { + return err + } + + switch tool { + case "patch": + // check that the patch tools supports the -o argument + temp, err := os.CreateTemp("", "patch-test") + if err != nil { + return err + } + temp.Close() + defer os.Remove(temp.Name()) + cmd := exec.Command(tool, "-o", temp.Name()) + if err := cmd.Run(); err != nil { + return err + } + + case "go": + checkGoBuild.once.Do(func() { + if runtime.GOROOT() != "" { + // Ensure that the 'go' command found by exec.LookPath is from the correct + // GOROOT. Otherwise, 'some/path/go test ./...' will test against some + // version of the 'go' binary other than 'some/path/go', which is almost + // certainly not what the user intended. + out, err := exec.Command(tool, "env", "GOROOT").Output() + if err != nil { + if exit, ok := err.(*exec.ExitError); ok && len(exit.Stderr) > 0 { + err = fmt.Errorf("%w\nstderr:\n%s)", err, exit.Stderr) + } + checkGoBuild.err = err + return + } + GOROOT := strings.TrimSpace(string(out)) + if GOROOT != runtime.GOROOT() { + checkGoBuild.err = fmt.Errorf("'go env GOROOT' does not match runtime.GOROOT:\n\tgo env: %s\n\tGOROOT: %s", GOROOT, runtime.GOROOT()) + return + } + } + + dir, err := os.MkdirTemp("", "testenv-*") + if err != nil { + checkGoBuild.err = err + return + } + defer os.RemoveAll(dir) + + mainGo := filepath.Join(dir, "main.go") + if err := os.WriteFile(mainGo, []byte("package main\nfunc main() {}\n"), 0644); err != nil { + checkGoBuild.err = err + return + } + cmd := exec.Command("go", "build", "-o", os.DevNull, mainGo) + cmd.Dir = dir + if out, err := cmd.CombinedOutput(); err != nil { + if len(out) > 0 { + checkGoBuild.err = fmt.Errorf("%v: %v\n%s", cmd, err, out) + } else { + checkGoBuild.err = fmt.Errorf("%v: %v", cmd, err) + } + } + }) + if checkGoBuild.err != nil { + return checkGoBuild.err + } + + case "diff": + // Check that diff is the GNU version, needed for the -u argument and + // to report missing newlines at the end of files. + out, err := exec.Command(tool, "-version").Output() + if err != nil { + return err + } + if !bytes.Contains(out, []byte("GNU diffutils")) { + return fmt.Errorf("diff is not the GNU version") + } + } + + return nil +} + +func cgoEnabled(bypassEnvironment bool) (bool, error) { + cmd := exec.Command("go", "env", "CGO_ENABLED") + if bypassEnvironment { + cmd.Env = append(append([]string(nil), os.Environ()...), "CGO_ENABLED=") + } + out, err := cmd.Output() + if err != nil { + if exit, ok := err.(*exec.ExitError); ok && len(exit.Stderr) > 0 { + err = fmt.Errorf("%w\nstderr:\n%s", err, exit.Stderr) + } + return false, err + } + enabled := strings.TrimSpace(string(out)) + return enabled == "1", nil +} + +func allowMissingTool(tool string) bool { + switch runtime.GOOS { + case "aix", "darwin", "dragonfly", "freebsd", "illumos", "linux", "netbsd", "openbsd", "plan9", "solaris", "windows": + // Known non-mobile OS. Expect a reasonably complete environment. + default: + return true + } + + switch tool { + case "cgo": + if strings.HasSuffix(os.Getenv("GO_BUILDER_NAME"), "-nocgo") { + // Explicitly disabled on -nocgo builders. + return true + } + if enabled, err := cgoEnabled(true); err == nil && !enabled { + // No platform support. + return true + } + case "go": + if os.Getenv("GO_BUILDER_NAME") == "illumos-amd64-joyent" { + // Work around a misconfigured builder (see https://golang.org/issue/33950). + return true + } + case "diff": + if os.Getenv("GO_BUILDER_NAME") != "" { + return true + } + case "patch": + if os.Getenv("GO_BUILDER_NAME") != "" { + return true + } + } + + // If a developer is actively working on this test, we expect them to have all + // of its dependencies installed. However, if it's just a dependency of some + // other module (for example, being run via 'go test all'), we should be more + // tolerant of unusual environments. + return !packageMainIsDevel() +} + +// NeedsTool skips t if the named tool is not present in the path. +// As a special case, "cgo" means "go" is present and can compile cgo programs. +func NeedsTool(t testing.TB, tool string) { + err := HasTool(tool) + if err == nil { + return + } + + t.Helper() + if allowMissingTool(tool) { + // TODO(adonovan): if we skip because of (e.g.) + // mismatched go env GOROOT and runtime.GOROOT, don't + // we risk some users not getting the coverage they expect? + // bcmills notes: this shouldn't be a concern as of CL 404134 (Go 1.19). + // We could probably safely get rid of that GOPATH consistency + // check entirely at this point. + t.Skipf("skipping because %s tool not available: %v", tool, err) + } else { + t.Fatalf("%s tool not available: %v", tool, err) + } +} + +// NeedsGoPackages skips t if the go/packages driver (or 'go' tool) implied by +// the current process environment is not present in the path. +func NeedsGoPackages(t testing.TB) { + t.Helper() + + tool := os.Getenv("GOPACKAGESDRIVER") + switch tool { + case "off": + // "off" forces go/packages to use the go command. + tool = "go" + case "": + if _, err := exec.LookPath("gopackagesdriver"); err == nil { + tool = "gopackagesdriver" + } else { + tool = "go" + } + } + + NeedsTool(t, tool) +} + +// NeedsGoPackagesEnv skips t if the go/packages driver (or 'go' tool) implied +// by env is not present in the path. +func NeedsGoPackagesEnv(t testing.TB, env []string) { + t.Helper() + + for _, v := range env { + if strings.HasPrefix(v, "GOPACKAGESDRIVER=") { + tool := strings.TrimPrefix(v, "GOPACKAGESDRIVER=") + if tool == "off" { + NeedsTool(t, "go") + } else { + NeedsTool(t, tool) + } + return + } + } + + NeedsGoPackages(t) +} + +// NeedsGoBuild skips t if the current system can't build programs with “go build” +// and then run them with os.StartProcess or exec.Command. +// Android doesn't have the userspace go build needs to run, +// and js/wasm doesn't support running subprocesses. +func NeedsGoBuild(t testing.TB) { + t.Helper() + + // This logic was derived from internal/testing.HasGoBuild and + // may need to be updated as that function evolves. + + NeedsTool(t, "go") +} + +// ExitIfSmallMachine emits a helpful diagnostic and calls os.Exit(0) if the +// current machine is a builder known to have scarce resources. +// +// It should be called from within a TestMain function. +func ExitIfSmallMachine() { + switch b := os.Getenv("GO_BUILDER_NAME"); b { + case "linux-arm-scaleway": + // "linux-arm" was renamed to "linux-arm-scaleway" in CL 303230. + fmt.Fprintln(os.Stderr, "skipping test: linux-arm-scaleway builder lacks sufficient memory (https://golang.org/issue/32834)") + case "plan9-arm": + fmt.Fprintln(os.Stderr, "skipping test: plan9-arm builder lacks sufficient memory (https://golang.org/issue/38772)") + case "netbsd-arm-bsiegert", "netbsd-arm64-bsiegert": + // As of 2021-06-02, these builders are running with GO_TEST_TIMEOUT_SCALE=10, + // and there is only one of each. We shouldn't waste those scarce resources + // running very slow tests. + fmt.Fprintf(os.Stderr, "skipping test: %s builder is very slow\n", b) + case "dragonfly-amd64": + // As of 2021-11-02, this builder is running with GO_TEST_TIMEOUT_SCALE=2, + // and seems to have unusually slow disk performance. + fmt.Fprintln(os.Stderr, "skipping test: dragonfly-amd64 has slow disk (https://golang.org/issue/45216)") + case "linux-riscv64-unmatched": + // As of 2021-11-03, this builder is empirically not fast enough to run + // gopls tests. Ideally we should make the tests faster in short mode + // and/or fix them to not assume arbitrary deadlines. + // For now, we'll skip them instead. + fmt.Fprintf(os.Stderr, "skipping test: %s builder is too slow (https://golang.org/issue/49321)\n", b) + default: + switch runtime.GOOS { + case "android", "ios": + fmt.Fprintf(os.Stderr, "skipping test: assuming that %s is resource-constrained\n", runtime.GOOS) + default: + return + } + } + os.Exit(0) +} + +// Go1Point returns the x in Go 1.x. +func Go1Point() int { + for i := len(build.Default.ReleaseTags) - 1; i >= 0; i-- { + var version int + if _, err := fmt.Sscanf(build.Default.ReleaseTags[i], "go1.%d", &version); err != nil { + continue + } + return version + } + panic("bad release tags") +} + +// NeedsGo1Point skips t if the Go version used to run the test is older than +// 1.x. +func NeedsGo1Point(t testing.TB, x int) { + if Go1Point() < x { + t.Helper() + t.Skipf("running Go version %q is version 1.%d, older than required 1.%d", runtime.Version(), Go1Point(), x) + } +} + +// SkipAfterGo1Point skips t if the Go version used to run the test is newer than +// 1.x. +func SkipAfterGo1Point(t testing.TB, x int) { + if Go1Point() > x { + t.Helper() + t.Skipf("running Go version %q is version 1.%d, newer than maximum 1.%d", runtime.Version(), Go1Point(), x) + } +} + +// NeedsLocalhostNet skips t if networking does not work for ports opened +// with "localhost". +func NeedsLocalhostNet(t testing.TB) { + switch runtime.GOOS { + case "js", "wasip1": + t.Skipf(`Listening on "localhost" fails on %s; see https://go.dev/issue/59718`, runtime.GOOS) + } +} + +// Deadline returns the deadline of t, if known, +// using the Deadline method added in Go 1.15. +func Deadline(t testing.TB) (time.Time, bool) { + td, ok := t.(interface { + Deadline() (time.Time, bool) + }) + if !ok { + return time.Time{}, false + } + return td.Deadline() +} + +// WriteImportcfg writes an importcfg file used by the compiler or linker to +// dstPath containing entries for the packages in std and cmd in addition +// to the package to package file mappings in additionalPackageFiles. +func WriteImportcfg(t testing.TB, dstPath string, additionalPackageFiles map[string]string) { + importcfg, err := goroot.Importcfg() + for k, v := range additionalPackageFiles { + importcfg += fmt.Sprintf("\npackagefile %s=%s", k, v) + } + if err != nil { + t.Fatalf("preparing the importcfg failed: %s", err) + } + os.WriteFile(dstPath, []byte(importcfg), 0655) + if err != nil { + t.Fatalf("writing the importcfg failed: %s", err) + } +} + +var ( + gorootOnce sync.Once + gorootPath string + gorootErr error +) + +func findGOROOT() (string, error) { + gorootOnce.Do(func() { + gorootPath = runtime.GOROOT() + if gorootPath != "" { + // If runtime.GOROOT() is non-empty, assume that it is valid. (It might + // not be: for example, the user may have explicitly set GOROOT + // to the wrong directory.) + return + } + + cmd := exec.Command("go", "env", "GOROOT") + out, err := cmd.Output() + if err != nil { + gorootErr = fmt.Errorf("%v: %v", cmd, err) + } + gorootPath = strings.TrimSpace(string(out)) + }) + + return gorootPath, gorootErr +} + +// GOROOT reports the path to the directory containing the root of the Go +// project source tree. This is normally equivalent to runtime.GOROOT, but +// works even if the test binary was built with -trimpath. +// +// If GOROOT cannot be found, GOROOT skips t if t is non-nil, +// or panics otherwise. +func GOROOT(t testing.TB) string { + path, err := findGOROOT() + if err != nil { + if t == nil { + panic(err) + } + t.Helper() + t.Skip(err) + } + return path +} + +// NeedsLocalXTools skips t if the golang.org/x/tools module is replaced and +// its replacement directory does not exist (or does not contain the module). +func NeedsLocalXTools(t testing.TB) { + t.Helper() + + NeedsTool(t, "go") + + cmd := Command(t, "go", "list", "-f", "{{with .Replace}}{{.Dir}}{{end}}", "-m", "golang.org/x/tools") + out, err := cmd.Output() + if err != nil { + if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 { + t.Skipf("skipping test: %v: %v\n%s", cmd, err, ee.Stderr) + } + t.Skipf("skipping test: %v: %v", cmd, err) + } + + dir := string(bytes.TrimSpace(out)) + if dir == "" { + // No replacement directory, and (since we didn't set -e) no error either. + // Maybe x/tools isn't replaced at all (as in a gopls release, or when + // using a go.work file that includes the x/tools module). + return + } + + // We found the directory where x/tools would exist if we're in a clone of the + // repo. Is it there? (If not, we're probably in the module cache instead.) + modFilePath := filepath.Join(dir, "go.mod") + b, err := os.ReadFile(modFilePath) + if err != nil { + t.Skipf("skipping test: x/tools replacement not found: %v", err) + } + modulePath := modfile.ModulePath(b) + + if want := "golang.org/x/tools"; modulePath != want { + t.Skipf("skipping test: %s module path is %q, not %q", modFilePath, modulePath, want) + } +} + +// NeedsGoExperiment skips t if the current process environment does not +// have a GOEXPERIMENT flag set. +func NeedsGoExperiment(t testing.TB, flag string) { + t.Helper() + + goexp := os.Getenv("GOEXPERIMENT") + set := false + for _, f := range strings.Split(goexp, ",") { + if f == "" { + continue + } + if f == "none" { + // GOEXPERIMENT=none disables all experiment flags. + set = false + break + } + val := true + if strings.HasPrefix(f, "no") { + f, val = f[2:], false + } + if f == flag { + set = val + } + } + if !set { + t.Skipf("skipping test: flag %q is not set in GOEXPERIMENT=%q", flag, goexp) + } +} diff --git a/vendor/golang.org/x/tools/internal/testenv/testenv_notunix.go b/vendor/golang.org/x/tools/internal/testenv/testenv_notunix.go new file mode 100644 index 00000000000..e9ce0d3649d --- /dev/null +++ b/vendor/golang.org/x/tools/internal/testenv/testenv_notunix.go @@ -0,0 +1,14 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !(unix || aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) +// +build !unix,!aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package testenv + +import "os" + +// Sigquit is the signal to send to kill a hanging subprocess. +// On Unix we send SIGQUIT, but on non-Unix we only have os.Kill. +var Sigquit = os.Kill diff --git a/vendor/golang.org/x/tools/internal/testenv/testenv_unix.go b/vendor/golang.org/x/tools/internal/testenv/testenv_unix.go new file mode 100644 index 00000000000..bc6af1ff81d --- /dev/null +++ b/vendor/golang.org/x/tools/internal/testenv/testenv_unix.go @@ -0,0 +1,14 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris +// +build unix aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package testenv + +import "syscall" + +// Sigquit is the signal to send to kill a hanging subprocess. +// Send SIGQUIT to get a stack trace. +var Sigquit = syscall.SIGQUIT diff --git a/vendor/golang.org/x/tools/txtar/archive.go b/vendor/golang.org/x/tools/txtar/archive.go new file mode 100644 index 00000000000..fd95f1e64a1 --- /dev/null +++ b/vendor/golang.org/x/tools/txtar/archive.go @@ -0,0 +1,140 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package txtar implements a trivial text-based file archive format. +// +// The goals for the format are: +// +// - be trivial enough to create and edit by hand. +// - be able to store trees of text files describing go command test cases. +// - diff nicely in git history and code reviews. +// +// Non-goals include being a completely general archive format, +// storing binary data, storing file modes, storing special files like +// symbolic links, and so on. +// +// # Txtar format +// +// A txtar archive is zero or more comment lines and then a sequence of file entries. +// Each file entry begins with a file marker line of the form "-- FILENAME --" +// and is followed by zero or more file content lines making up the file data. +// The comment or file content ends at the next file marker line. +// The file marker line must begin with the three-byte sequence "-- " +// and end with the three-byte sequence " --", but the enclosed +// file name can be surrounding by additional white space, +// all of which is stripped. +// +// If the txtar file is missing a trailing newline on the final line, +// parsers should consider a final newline to be present anyway. +// +// There are no possible syntax errors in a txtar archive. +package txtar + +import ( + "bytes" + "fmt" + "os" + "strings" +) + +// An Archive is a collection of files. +type Archive struct { + Comment []byte + Files []File +} + +// A File is a single file in an archive. +type File struct { + Name string // name of file ("foo/bar.txt") + Data []byte // text content of file +} + +// Format returns the serialized form of an Archive. +// It is assumed that the Archive data structure is well-formed: +// a.Comment and all a.File[i].Data contain no file marker lines, +// and all a.File[i].Name is non-empty. +func Format(a *Archive) []byte { + var buf bytes.Buffer + buf.Write(fixNL(a.Comment)) + for _, f := range a.Files { + fmt.Fprintf(&buf, "-- %s --\n", f.Name) + buf.Write(fixNL(f.Data)) + } + return buf.Bytes() +} + +// ParseFile parses the named file as an archive. +func ParseFile(file string) (*Archive, error) { + data, err := os.ReadFile(file) + if err != nil { + return nil, err + } + return Parse(data), nil +} + +// Parse parses the serialized form of an Archive. +// The returned Archive holds slices of data. +func Parse(data []byte) *Archive { + a := new(Archive) + var name string + a.Comment, name, data = findFileMarker(data) + for name != "" { + f := File{name, nil} + f.Data, name, data = findFileMarker(data) + a.Files = append(a.Files, f) + } + return a +} + +var ( + newlineMarker = []byte("\n-- ") + marker = []byte("-- ") + markerEnd = []byte(" --") +) + +// findFileMarker finds the next file marker in data, +// extracts the file name, and returns the data before the marker, +// the file name, and the data after the marker. +// If there is no next marker, findFileMarker returns before = fixNL(data), name = "", after = nil. +func findFileMarker(data []byte) (before []byte, name string, after []byte) { + var i int + for { + if name, after = isMarker(data[i:]); name != "" { + return data[:i], name, after + } + j := bytes.Index(data[i:], newlineMarker) + if j < 0 { + return fixNL(data), "", nil + } + i += j + 1 // positioned at start of new possible marker + } +} + +// isMarker checks whether data begins with a file marker line. +// If so, it returns the name from the line and the data after the line. +// Otherwise it returns name == "" with an unspecified after. +func isMarker(data []byte) (name string, after []byte) { + if !bytes.HasPrefix(data, marker) { + return "", nil + } + if i := bytes.IndexByte(data, '\n'); i >= 0 { + data, after = data[:i], data[i+1:] + } + if !(bytes.HasSuffix(data, markerEnd) && len(data) >= len(marker)+len(markerEnd)) { + return "", nil + } + return strings.TrimSpace(string(data[len(marker) : len(data)-len(markerEnd)])), after +} + +// If data is empty or ends in \n, fixNL returns data. +// Otherwise fixNL returns a new slice consisting of data with a final \n added. +func fixNL(data []byte) []byte { + if len(data) == 0 || data[len(data)-1] == '\n' { + return data + } + d := make([]byte, len(data)+1) + copy(d, data) + d[len(data)] = '\n' + return d +} diff --git a/vendor/golang.org/x/tools/txtar/fs.go b/vendor/golang.org/x/tools/txtar/fs.go new file mode 100644 index 00000000000..e37397e7b71 --- /dev/null +++ b/vendor/golang.org/x/tools/txtar/fs.go @@ -0,0 +1,259 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package txtar + +import ( + "errors" + "fmt" + "io" + "io/fs" + "path" + "time" +) + +// FS returns the file system form of an Archive. +// It returns an error if any of the file names in the archive +// are not valid file system names. +// The archive must not be modified while the FS is in use. +// +// If the file system detects that it has been modified, calls to the +// file system return an ErrModified error. +func FS(a *Archive) (fs.FS, error) { + // Create a filesystem with a root directory. + root := &node{fileinfo: fileinfo{path: ".", mode: readOnlyDir}} + fsys := &filesystem{a, map[string]*node{root.path: root}} + + if err := initFiles(fsys); err != nil { + return nil, fmt.Errorf("cannot create fs.FS from txtar.Archive: %s", err) + } + return fsys, nil +} + +const ( + readOnly fs.FileMode = 0o444 // read only mode + readOnlyDir = readOnly | fs.ModeDir +) + +// ErrModified indicates that file system returned by FS +// noticed that the underlying archive has been modified +// since the call to FS. Detection of modification is best effort, +// to help diagnose misuse of the API, and is not guaranteed. +var ErrModified error = errors.New("txtar.Archive has been modified during txtar.FS") + +// A filesystem is a simple in-memory file system for txtar archives, +// represented as a map from valid path names to information about the +// files or directories they represent. +// +// File system operations are read only. Modifications to the underlying +// *Archive may race. To help prevent this, the filesystem tries +// to detect modification during Open and return ErrModified if it +// is able to detect a modification. +type filesystem struct { + ar *Archive + nodes map[string]*node +} + +// node is a file or directory in the tree of a filesystem. +type node struct { + fileinfo // fs.FileInfo and fs.DirEntry implementation + idx int // index into ar.Files (for files) + entries []fs.DirEntry // subdirectories and files (for directories) +} + +var _ fs.FS = (*filesystem)(nil) +var _ fs.DirEntry = (*node)(nil) + +// initFiles initializes fsys from fsys.ar.Files. Returns an error if there are any +// invalid file names or collisions between file or directories. +func initFiles(fsys *filesystem) error { + for idx, file := range fsys.ar.Files { + name := file.Name + if !fs.ValidPath(name) { + return fmt.Errorf("file %q is an invalid path", name) + } + + n := &node{idx: idx, fileinfo: fileinfo{path: name, size: len(file.Data), mode: readOnly}} + if err := insert(fsys, n); err != nil { + return err + } + } + return nil +} + +// insert adds node n as an entry to its parent directory within the filesystem. +func insert(fsys *filesystem, n *node) error { + if m := fsys.nodes[n.path]; m != nil { + return fmt.Errorf("duplicate path %q", n.path) + } + fsys.nodes[n.path] = n + + // fsys.nodes contains "." to prevent infinite loops. + parent, err := directory(fsys, path.Dir(n.path)) + if err != nil { + return err + } + parent.entries = append(parent.entries, n) + return nil +} + +// directory returns the directory node with the path dir and lazily-creates it +// if it does not exist. +func directory(fsys *filesystem, dir string) (*node, error) { + if m := fsys.nodes[dir]; m != nil && m.IsDir() { + return m, nil // pre-existing directory + } + + n := &node{fileinfo: fileinfo{path: dir, mode: readOnlyDir}} + if err := insert(fsys, n); err != nil { + return nil, err + } + return n, nil +} + +// dataOf returns the data associated with the file t. +// May return ErrModified if fsys.ar has been modified. +func dataOf(fsys *filesystem, n *node) ([]byte, error) { + if n.idx >= len(fsys.ar.Files) { + return nil, ErrModified + } + + f := fsys.ar.Files[n.idx] + if f.Name != n.path || len(f.Data) != n.size { + return nil, ErrModified + } + return f.Data, nil +} + +func (fsys *filesystem) Open(name string) (fs.File, error) { + if !fs.ValidPath(name) { + return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrInvalid} + } + + n := fsys.nodes[name] + switch { + case n == nil: + return nil, &fs.PathError{Op: "open", Path: name, Err: fs.ErrNotExist} + case n.IsDir(): + return &openDir{fileinfo: n.fileinfo, entries: n.entries}, nil + default: + data, err := dataOf(fsys, n) + if err != nil { + return nil, err + } + return &openFile{fileinfo: n.fileinfo, data: data}, nil + } +} + +func (fsys *filesystem) ReadFile(name string) ([]byte, error) { + file, err := fsys.Open(name) + if err != nil { + return nil, err + } + if file, ok := file.(*openFile); ok { + // TODO: use slices.Clone once x/tools has 1.21 available. + cp := make([]byte, file.size) + copy(cp, file.data) + return cp, err + } + return nil, &fs.PathError{Op: "read", Path: name, Err: fs.ErrInvalid} +} + +// A fileinfo implements fs.FileInfo and fs.DirEntry for a given archive file. +type fileinfo struct { + path string // unique path to the file or directory within a filesystem + size int + mode fs.FileMode +} + +var _ fs.FileInfo = (*fileinfo)(nil) +var _ fs.DirEntry = (*fileinfo)(nil) + +func (i *fileinfo) Name() string { return path.Base(i.path) } +func (i *fileinfo) Size() int64 { return int64(i.size) } +func (i *fileinfo) Mode() fs.FileMode { return i.mode } +func (i *fileinfo) Type() fs.FileMode { return i.mode.Type() } +func (i *fileinfo) ModTime() time.Time { return time.Time{} } +func (i *fileinfo) IsDir() bool { return i.mode&fs.ModeDir != 0 } +func (i *fileinfo) Sys() any { return nil } +func (i *fileinfo) Info() (fs.FileInfo, error) { return i, nil } + +// An openFile is a regular (non-directory) fs.File open for reading. +type openFile struct { + fileinfo + data []byte + offset int64 +} + +var _ fs.File = (*openFile)(nil) + +func (f *openFile) Stat() (fs.FileInfo, error) { return &f.fileinfo, nil } +func (f *openFile) Close() error { return nil } +func (f *openFile) Read(b []byte) (int, error) { + if f.offset >= int64(len(f.data)) { + return 0, io.EOF + } + if f.offset < 0 { + return 0, &fs.PathError{Op: "read", Path: f.path, Err: fs.ErrInvalid} + } + n := copy(b, f.data[f.offset:]) + f.offset += int64(n) + return n, nil +} + +func (f *openFile) Seek(offset int64, whence int) (int64, error) { + switch whence { + case 0: + // offset += 0 + case 1: + offset += f.offset + case 2: + offset += int64(len(f.data)) + } + if offset < 0 || offset > int64(len(f.data)) { + return 0, &fs.PathError{Op: "seek", Path: f.path, Err: fs.ErrInvalid} + } + f.offset = offset + return offset, nil +} + +func (f *openFile) ReadAt(b []byte, offset int64) (int, error) { + if offset < 0 || offset > int64(len(f.data)) { + return 0, &fs.PathError{Op: "read", Path: f.path, Err: fs.ErrInvalid} + } + n := copy(b, f.data[offset:]) + if n < len(b) { + return n, io.EOF + } + return n, nil +} + +// A openDir is a directory fs.File (so also an fs.ReadDirFile) open for reading. +type openDir struct { + fileinfo + entries []fs.DirEntry + offset int +} + +var _ fs.ReadDirFile = (*openDir)(nil) + +func (d *openDir) Stat() (fs.FileInfo, error) { return &d.fileinfo, nil } +func (d *openDir) Close() error { return nil } +func (d *openDir) Read(b []byte) (int, error) { + return 0, &fs.PathError{Op: "read", Path: d.path, Err: fs.ErrInvalid} +} + +func (d *openDir) ReadDir(count int) ([]fs.DirEntry, error) { + n := len(d.entries) - d.offset + if n == 0 && count > 0 { + return nil, io.EOF + } + if count > 0 && n > count { + n = count + } + list := make([]fs.DirEntry, n) + copy(list, d.entries[d.offset:d.offset+n]) + d.offset += n + return list, nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 897a583d5da..0c114d4e31d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -905,6 +905,22 @@ github.com/russross/blackfriday/v2 # github.com/schollz/peerdiscovery v1.7.0 ## explicit; go 1.13 github.com/schollz/peerdiscovery +# github.com/segmentio/asm v1.1.3 +## explicit; go 1.17 +github.com/segmentio/asm/ascii +github.com/segmentio/asm/base64 +github.com/segmentio/asm/cpu +github.com/segmentio/asm/cpu/arm +github.com/segmentio/asm/cpu/arm64 +github.com/segmentio/asm/cpu/cpuid +github.com/segmentio/asm/cpu/x86 +github.com/segmentio/asm/internal/unsafebytes +github.com/segmentio/asm/keyset +# github.com/segmentio/encoding v0.3.4 +## explicit; go 1.14 +github.com/segmentio/encoding/ascii +github.com/segmentio/encoding/iso8601 +github.com/segmentio/encoding/json # github.com/shirou/gopsutil v3.21.11+incompatible ## explicit github.com/shirou/gopsutil/cpu @@ -1145,6 +1161,18 @@ github.com/zenthangplus/goccm # go.etcd.io/bbolt v1.3.6 ## explicit; go 1.12 go.etcd.io/bbolt +# go.lsp.dev/jsonrpc2 v0.10.0 +## explicit; go 1.17 +go.lsp.dev/jsonrpc2 +# go.lsp.dev/pkg v0.0.0-20210717090340-384b27a52fb2 +## explicit; go 1.15 +go.lsp.dev/pkg/xcontext +# go.lsp.dev/protocol v0.12.0 +## explicit; go 1.17 +go.lsp.dev/protocol +# go.lsp.dev/uri v0.3.0 +## explicit; go 1.13 +go.lsp.dev/uri # go.uber.org/atomic v1.11.0 ## explicit; go 1.18 go.uber.org/atomic @@ -1290,6 +1318,13 @@ golang.org/x/time/rate ## explicit; go 1.19 golang.org/x/tools/cmd/goimports golang.org/x/tools/cover +golang.org/x/tools/go/analysis +golang.org/x/tools/go/analysis/analysistest +golang.org/x/tools/go/analysis/internal/analysisflags +golang.org/x/tools/go/analysis/internal/checker +golang.org/x/tools/go/analysis/passes/inspect +golang.org/x/tools/go/analysis/singlechecker +golang.org/x/tools/go/analysis/unitchecker golang.org/x/tools/go/ast/astutil golang.org/x/tools/go/ast/inspector golang.org/x/tools/go/gcexportdata @@ -1297,20 +1332,28 @@ golang.org/x/tools/go/packages golang.org/x/tools/go/types/objectpath golang.org/x/tools/imports golang.org/x/tools/internal/aliases +golang.org/x/tools/internal/analysisinternal +golang.org/x/tools/internal/diff +golang.org/x/tools/internal/diff/lcs golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core golang.org/x/tools/internal/event/keys golang.org/x/tools/internal/event/label +golang.org/x/tools/internal/facts golang.org/x/tools/internal/gcimporter golang.org/x/tools/internal/gocommand golang.org/x/tools/internal/gopathwalk +golang.org/x/tools/internal/goroot golang.org/x/tools/internal/imports golang.org/x/tools/internal/packagesinternal golang.org/x/tools/internal/pkgbits +golang.org/x/tools/internal/robustio golang.org/x/tools/internal/stdlib +golang.org/x/tools/internal/testenv golang.org/x/tools/internal/tokeninternal golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions +golang.org/x/tools/txtar # google.golang.org/protobuf v1.34.2 ## explicit; go 1.20 google.golang.org/protobuf/cmd/protoc-gen-go diff --git a/waku/v1/peer.go b/waku/v1/peer.go index 196d0e0ace4..9882e62d7cb 100644 --- a/waku/v1/peer.go +++ b/waku/v1/peer.go @@ -481,6 +481,8 @@ func (p *Peer) handshake() error { // update executes periodic operations on the peer, including message transmission // and expiration. func (p *Peer) update() { + defer gocommon.LogOnPanic() + // Start the tickers for the updates expire := time.NewTicker(common.ExpirationCycle) transmit := time.NewTicker(common.TransmissionCycle) diff --git a/wakuv2/waku.go b/wakuv2/waku.go index 43c0626bc91..d1f6b7279bc 100644 --- a/wakuv2/waku.go +++ b/wakuv2/waku.go @@ -196,7 +196,10 @@ func (w *Waku) SetStatusTelemetryClient(client ITelemetryClient) { func newTTLCache() *ttlcache.Cache[gethcommon.Hash, *common.ReceivedMessage] { cache := ttlcache.New[gethcommon.Hash, *common.ReceivedMessage](ttlcache.WithTTL[gethcommon.Hash, *common.ReceivedMessage](cacheTTL)) - go cache.Start() + go func() { + defer gocommon.LogOnPanic() + cache.Start() + }() return cache } @@ -1219,7 +1222,10 @@ func (w *Waku) Start() error { w.wg.Add(1) go w.broadcast() - go w.sendQueue.Start(w.ctx) + go func() { + defer gocommon.LogOnPanic() + w.sendQueue.Start(w.ctx) + }() err = w.startMessageSender() if err != nil { @@ -1718,7 +1724,10 @@ func (w *Waku) ConnectionChanged(state connection.State) { isOnline := !state.Offline if w.cfg.LightClient { //TODO: Update this as per https://github.com/waku-org/go-waku/issues/1114 - go w.filterManager.OnConnectionStatusChange("", isOnline) + go func() { + defer gocommon.LogOnPanic() + w.filterManager.OnConnectionStatusChange("", isOnline) + }() w.handleNetworkChangeFromApp(state) } else { // for lightClient state update and onlineChange is handled in filterManager. From c72f491324909173c83e69c5a86ace7aeb87daaa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Soko=C5=82owski?= Date: Wed, 23 Oct 2024 14:16:46 +0200 Subject: [PATCH 5/7] fix(ci)_: remove workspace and tmp dir MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This ensures we do not encounter weird errors like: ``` + ln -s /home/jenkins/workspace/go_prs_linux_x86_64_main_PR-5907 /home/jenkins/workspace/go_prs_linux_x86_64_main_PR-5907@tmp/go/src/github.com/status-im/status-go ln: failed to create symbolic link '/home/jenkins/workspace/go_prs_linux_x86_64_main_PR-5907@tmp/go/src/github.com/status-im/status-go': File exists script returned exit code 1 ``` Signed-off-by: Jakub Sokołowski --- _assets/ci/Jenkinsfile.android | 5 ++++- _assets/ci/Jenkinsfile.ios | 5 ++++- _assets/ci/Jenkinsfile.linux | 5 ++++- _assets/ci/Jenkinsfile.tests | 6 +++--- 4 files changed, 15 insertions(+), 6 deletions(-) diff --git a/_assets/ci/Jenkinsfile.android b/_assets/ci/Jenkinsfile.android index ef13cfaffbc..ed80f3822a5 100644 --- a/_assets/ci/Jenkinsfile.android +++ b/_assets/ci/Jenkinsfile.android @@ -85,6 +85,9 @@ pipeline { post { success { script { github.notifyPR(true) } } failure { script { github.notifyPR(false) } } - cleanup { sh 'make deep-clean' } + cleanup { + cleanWs() + dir("${env.WORKSPACE}@tmp") { deleteDir() } + } } // post } // pipeline diff --git a/_assets/ci/Jenkinsfile.ios b/_assets/ci/Jenkinsfile.ios index 935b4e12243..90dbc3d48e4 100644 --- a/_assets/ci/Jenkinsfile.ios +++ b/_assets/ci/Jenkinsfile.ios @@ -89,6 +89,9 @@ pipeline { post { success { script { github.notifyPR(true) } } failure { script { github.notifyPR(false) } } - cleanup { sh 'make deep-clean' } + cleanup { + cleanWs() + dir("${env.WORKSPACE}@tmp") { deleteDir() } + } } // post } // pipeline diff --git a/_assets/ci/Jenkinsfile.linux b/_assets/ci/Jenkinsfile.linux index 1e76096b868..869b26b28f4 100644 --- a/_assets/ci/Jenkinsfile.linux +++ b/_assets/ci/Jenkinsfile.linux @@ -92,6 +92,9 @@ pipeline { post { success { script { github.notifyPR(true) } } failure { script { github.notifyPR(false) } } - cleanup { sh 'make deep-clean' } + cleanup { + cleanWs() + dir("${env.WORKSPACE}@tmp") { deleteDir() } + } } // post } // pipeline diff --git a/_assets/ci/Jenkinsfile.tests b/_assets/ci/Jenkinsfile.tests index 627f4e71c81..499df4b08d5 100644 --- a/_assets/ci/Jenkinsfile.tests +++ b/_assets/ci/Jenkinsfile.tests @@ -238,8 +238,8 @@ pipeline { } } cleanup { - dir(env.TMPDIR) { deleteDir() } - sh "make git-clean" + cleanWs() + dir("${env.WORKSPACE}@tmp") { deleteDir() } } } // post } // pipeline @@ -254,4 +254,4 @@ def getDefaultUnitTestCount() { isNightlyJob() ? '20' : '1' } def getDefaultTimeout() { isNightlyJob() ? 5*60 : 50 } -def getAmountToKeep() { isNightlyJob() ? '14' : isDevelopJob() ? '10' : '5' } \ No newline at end of file +def getAmountToKeep() { isNightlyJob() ? '14' : isDevelopJob() ? '10' : '5' } From 6ee62061bc6fdab05a448f6af6c0729211d341fc Mon Sep 17 00:00:00 2001 From: Vedran <82972815+mendelskiv93@users.noreply.github.com> Date: Fri, 25 Oct 2024 09:40:47 +0200 Subject: [PATCH 6/7] chore_: enable windows and macos CI build (#5840) - Added support for Windows and macOS in CI pipelines - Added missing dependencies for Windows and x86-64-darwin - Resolved macOS SDK version compatibility for darwin-x86_64 The `mkShell` override was necessary to ensure compatibility with the newer macOS SDK (version 11.0) for x86_64. The default SDK (10.12) was causing build failures because of the missing libs and frameworks. OverrideSDK creates a mapping from the default SDK in all package categories to the requested SDK (11.0). --- Makefile | 5 + _assets/ci/Jenkinsfile | 8 +- _assets/ci/Jenkinsfile.desktop | 166 +++++++++++++++++++++++++++++++ _assets/ci/Jenkinsfile.linux | 101 +------------------ _assets/ci/Jenkinsfile.macos | 1 + _assets/ci/Jenkinsfile.windows | 1 + nix/pkgs/codecov-cli/default.nix | 1 + nix/shell.nix | 7 +- 8 files changed, 188 insertions(+), 102 deletions(-) create mode 100644 _assets/ci/Jenkinsfile.desktop mode change 100644 => 120000 _assets/ci/Jenkinsfile.linux create mode 120000 _assets/ci/Jenkinsfile.macos create mode 120000 _assets/ci/Jenkinsfile.windows diff --git a/Makefile b/Makefile index 0d0916ee48e..4149ccba87c 100644 --- a/Makefile +++ b/Makefile @@ -193,6 +193,11 @@ statusgo-cross: statusgo-android statusgo-ios @echo "Full cross compilation done." @ls -ld build/bin/statusgo-* +status-go-deps: + go install go.uber.org/mock/mockgen@v0.4.0 + go install github.com/kevinburke/go-bindata/v4/...@v4.0.2 + go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.1 + statusgo-android: generate statusgo-android: ##@cross-compile Build status-go for Android @echo "Building status-go for Android..." diff --git a/_assets/ci/Jenkinsfile b/_assets/ci/Jenkinsfile index ed66572c3e0..ab3fa726d18 100644 --- a/_assets/ci/Jenkinsfile +++ b/_assets/ci/Jenkinsfile @@ -1,5 +1,5 @@ #!/usr/bin/env groovy -library 'status-jenkins-lib@v1.9.6' +library 'status-jenkins-lib@v1.9.12' pipeline { agent { label 'linux' } @@ -52,6 +52,12 @@ pipeline { stage('Linux') { steps { script { linux = jenkins.Build('status-go/platforms/linux') } } } + stage('MacOS') { steps { script { + linux = jenkins.Build('status-go/platforms/macos') + } } } + stage('Windows') { steps { script { + linux = jenkins.Build('status-go/platforms/windows') + } } } stage('Docker') { steps { script { dock = jenkins.Build('status-go/platforms/docker') } } } diff --git a/_assets/ci/Jenkinsfile.desktop b/_assets/ci/Jenkinsfile.desktop new file mode 100644 index 00000000000..c7e460c8468 --- /dev/null +++ b/_assets/ci/Jenkinsfile.desktop @@ -0,0 +1,166 @@ +#!/usr/bin/env groovy +library 'status-jenkins-lib@v1.9.12' + +pipeline { + /* This way we run the same Jenkinsfile on different platforms. */ + agent { label "${params.AGENT_LABEL}" } + + parameters { + string( + name: 'BRANCH', + defaultValue: 'develop', + description: 'Name of branch to build.' + ) + string( + name: 'AGENT_LABEL', + description: 'Label for targetted CI slave host.', + defaultValue: params.AGENT_LABEL ?: getAgentLabel(), + ) + booleanParam( + name: 'RELEASE', + defaultValue: false, + description: 'Enable to create build for release.', + ) + } + + options { + timestamps() + ansiColor('xterm') + /* Prevent Jenkins jobs from running forever */ + timeout(time: 15, unit: 'MINUTES') + disableConcurrentBuilds() + /* manage how many builds we keep */ + buildDiscarder(logRotator( + numToKeepStr: '5', + daysToKeepStr: '30', + artifactNumToKeepStr: '1', + )) + } + + environment { + PLATFORM = getPlatformFromLabel(params.AGENT_LABEL) + TMPDIR = "${WORKSPACE_TMP}" + GOPATH = "${WORKSPACE_TMP}/go" + GOCACHE = "${WORKSPACE_TMP}/gocache" + PATH = "${PATH}:${GOPATH}/bin:/c/Users/jenkins/go/bin" + REPO_SRC = "${GOPATH}/src/github.com/status-im/status-go" + VERSION = sh(script: "./_assets/scripts/version.sh", returnStdout: true) + ARTIFACT = utils.pkgFilename( + name: 'status-go', + type: env.PLATFORM, + version: env.VERSION, + ext: 'zip', + ) + /* prevent sharing cache dir across different jobs */ + GO_GENERATE_FAST_DIR = "${env.WORKSPACE_TMP}/go-generate-fast" + } + + stages { + stage('Setup') { + steps { + script { + if (env.PLATFORM != 'windows') { + sh "mkdir -p \$(dirname ${REPO_SRC})" + sh "ln -s ${WORKSPACE} ${REPO_SRC}" + } + } + } + } + + stage('Deps') { + steps { script { + shell('make status-go-deps') + } + } + } + + stage('Generate') { + steps { script { + shell('make generate') + } + } + } + + stage('Build Static Lib') { + steps { + script { + shell('make statusgo-library') + } + } + } + + stage('Build Shared Lib') { + steps { + script { + shell('make statusgo-shared-library') + } + } + } + + stage('Archive') { + steps { + zip zipFile: "${ARTIFACT}", archive: true, dir: 'build/bin' + } + } + + stage('Upload') { + steps { + script { + env.PKG_URL = s5cmd.upload(ARTIFACT) + } + } + } + stage('Cleanup') { + steps { + script { + cleanTmp() + } + } + } +} // stages + post { + success { script { github.notifyPR(true) } } + failure { script { github.notifyPR(false) } } + cleanup { cleanWs() } + } // post +} // pipeline + +/* This allows us to use one Jenkinsfile and run + * jobs on different platforms based on job name. */ +def getAgentLabel() { + if (params.AGENT_LABEL) { return params.AGENT_LABEL } + /* We extract the name of the job from currentThread because + * before an agent is picket env is not available. */ + def tokens = Thread.currentThread().getName().split('/') + def labels = [] + /* Check if the job path contains any of the valid labels. */ + ['linux', 'macos', 'windows', 'x86_64', 'aarch64', 'arm64'].each { + if (tokens.contains(it)) { labels.add(it) } + } + return labels.join(' && ') +} + +/* This function extracts the platform from the AGENT_LABEL */ +def getPlatformFromLabel(label) { + for (platform in ['linux', 'macos', 'windows']) { + if (label.contains(platform)) { + return platform + } + } +} + +def shell(cmd) { + if (env.PLATFORM == 'windows') { + sh "${cmd} SHELL=/bin/sh" + } else { + nix.shell(cmd, pure: false) // Use nix.shell for Linux/macOS + } +} + +def cleanTmp() { + if (env.PLATFORM == 'windows') { + sh "rm -rf ${env.WORKSPACE}@tmp" + } else { + dir("${env.WORKSPACE}@tmp") { deleteDir() } + } +} \ No newline at end of file diff --git a/_assets/ci/Jenkinsfile.linux b/_assets/ci/Jenkinsfile.linux deleted file mode 100644 index 869b26b28f4..00000000000 --- a/_assets/ci/Jenkinsfile.linux +++ /dev/null @@ -1,100 +0,0 @@ -#!/usr/bin/env groovy -library 'status-jenkins-lib@v1.9.6' - -pipeline { - agent { label 'linux && x86_64 && nix-2.19' } - - parameters { - string( - name: 'BRANCH', - defaultValue: 'develop', - description: 'Name of branch to build.' - ) - booleanParam( - name: 'RELEASE', - defaultValue: false, - description: 'Enable to create build for release.', - ) - } - - options { - timestamps() - ansiColor('xterm') - /* Prevent Jenkins jobs from running forever */ - timeout(time: 10, unit: 'MINUTES') - disableConcurrentBuilds() - /* manage how many builds we keep */ - buildDiscarder(logRotator( - numToKeepStr: '5', - daysToKeepStr: '30', - artifactNumToKeepStr: '1', - )) - } - - environment { - PLATFORM = 'linux' - TMPDIR = "${WORKSPACE_TMP}" - GOPATH = "${WORKSPACE_TMP}/go" - GOCACHE = "${WORKSPACE_TMP}/gocache" - PATH = "${PATH}:${GOPATH}/bin" - REPO_SRC = "${GOPATH}/src/github.com/status-im/status-go" - VERSION = sh(script: "./_assets/scripts/version.sh", returnStdout: true) - ARTIFACT = utils.pkgFilename( - name: 'status-go', - type: env.PLATFORM, - version: env.VERSION, - ext: 'zip', - ) - /* prevent sharing cache dir across different jobs */ - GO_GENERATE_FAST_DIR = "${env.WORKSPACE_TMP}/go-generate-fast" - } - - stages { - stage('Setup') { - steps { /* Go needs to find status-go in GOPATH. */ - sh "mkdir -p \$(dirname ${REPO_SRC})" - sh "ln -s ${WORKSPACE} ${REPO_SRC}" - } - } - - stage('Generate') { - steps { script { - nix.shell('make generate', pure: false) - } } - } - - /* Sanity-check C bindings */ - stage('Build Static Lib') { - steps { script { - nix.shell('make statusgo-library', pure: false) - } } - } - - stage('Build Shared Lib') { - steps { script { - nix.shell('make statusgo-shared-library', pure: false) - } } - } - - stage('Archive') { - steps { - sh "zip -q -r ${ARTIFACT} build/bin" - archiveArtifacts(ARTIFACT) - } - } - - stage('Upload') { - steps { script { - env.PKG_URL = s5cmd.upload(ARTIFACT) - } } - } - } // stages - post { - success { script { github.notifyPR(true) } } - failure { script { github.notifyPR(false) } } - cleanup { - cleanWs() - dir("${env.WORKSPACE}@tmp") { deleteDir() } - } - } // post -} // pipeline diff --git a/_assets/ci/Jenkinsfile.linux b/_assets/ci/Jenkinsfile.linux new file mode 120000 index 00000000000..6047babb30e --- /dev/null +++ b/_assets/ci/Jenkinsfile.linux @@ -0,0 +1 @@ +Jenkinsfile.desktop \ No newline at end of file diff --git a/_assets/ci/Jenkinsfile.macos b/_assets/ci/Jenkinsfile.macos new file mode 120000 index 00000000000..6047babb30e --- /dev/null +++ b/_assets/ci/Jenkinsfile.macos @@ -0,0 +1 @@ +Jenkinsfile.desktop \ No newline at end of file diff --git a/_assets/ci/Jenkinsfile.windows b/_assets/ci/Jenkinsfile.windows new file mode 120000 index 00000000000..6047babb30e --- /dev/null +++ b/_assets/ci/Jenkinsfile.windows @@ -0,0 +1 @@ +Jenkinsfile.desktop \ No newline at end of file diff --git a/nix/pkgs/codecov-cli/default.nix b/nix/pkgs/codecov-cli/default.nix index 12fbf83e01b..b342a1561e2 100644 --- a/nix/pkgs/codecov-cli/default.nix +++ b/nix/pkgs/codecov-cli/default.nix @@ -16,6 +16,7 @@ in stdenv.mkDerivation rec { url = "https://cli.codecov.io/v${version}/${platform}/codecov"; hash = lib.getAttr builtins.currentSystem { aarch64-darwin = "sha256-CB1D8/zYF23Jes9sd6rJiadDg7nwwee9xWSYqSByAlU="; + x86_64-darwin = "sha256-CB1D8/zYF23Jes9sd6rJiadDg7nwwee9xWSYqSByAlU="; x86_64-linux = "sha256-65AgCcuAD977zikcE1eVP4Dik4L0PHqYzOO1fStNjOw="; aarch64-linux = "sha256-hALtVSXY40uTIaAtwWr7EXh7zclhK63r7a341Tn+q/g="; }; diff --git a/nix/shell.nix b/nix/shell.nix index 10f2647c77e..5a29bac6aa3 100644 --- a/nix/shell.nix +++ b/nix/shell.nix @@ -16,7 +16,12 @@ let inherit xcodeWrapper; withAndroidPkgs = !isMacM1; }; -in pkgs.mkShell { + /* Override the default SDK to enable darwin-x86_64 builds */ + appleSdk11Stdenv = pkgs.overrideSDK pkgs.stdenv "11.0"; + sdk11mkShell = pkgs.mkShell.override { stdenv = appleSdk11Stdenv; }; + mkShell = if stdenv.isDarwin then sdk11mkShell else pkgs.mkShell; + +in mkShell { name = "status-go-shell"; buildInputs = with pkgs; [ From d99fdf1b672c2f8207371e7e2386a403196341b9 Mon Sep 17 00:00:00 2001 From: Jonathan Rainville Date: Fri, 25 Oct 2024 17:28:58 -0400 Subject: [PATCH 7/7] fix(contacts)_: fix trust status not being saved to cache when changed (#5965) Fixes https://github.com/status-im/status-desktop/issues/16392 --- protocol/messenger_contact_verification.go | 30 ++++++++---- .../messenger_contact_verification_test.go | 47 +++++++++++++++++++ 2 files changed, 68 insertions(+), 9 deletions(-) diff --git a/protocol/messenger_contact_verification.go b/protocol/messenger_contact_verification.go index b0d3328b69e..af6c6c8fa3d 100644 --- a/protocol/messenger_contact_verification.go +++ b/protocol/messenger_contact_verification.go @@ -24,6 +24,10 @@ import ( const minContactVerificationMessageLen = 1 const maxContactVerificationMessageLen = 280 +var ( + ErrContactNotMutual = errors.New("must be a mutual contact") +) + func (m *Messenger) SendContactVerificationRequest(ctx context.Context, contactID string, challenge string) (*MessengerResponse, error) { if len(challenge) < minContactVerificationMessageLen || len(challenge) > maxContactVerificationMessageLen { return nil, errors.New("invalid verification request challenge length") @@ -31,7 +35,7 @@ func (m *Messenger) SendContactVerificationRequest(ctx context.Context, contactI contact, ok := m.allContacts.Load(contactID) if !ok || !contact.mutual() { - return nil, errors.New("must be a mutual contact") + return nil, ErrContactNotMutual } verifRequest := &verification.Request{ @@ -138,7 +142,7 @@ func (m *Messenger) SendContactVerificationRequest(ctx context.Context, contactI func (m *Messenger) GetVerificationRequestSentTo(ctx context.Context, contactID string) (*verification.Request, error) { _, ok := m.allContacts.Load(contactID) if !ok { - return nil, errors.New("contact not found") + return nil, ErrContactNotFound } return m.verificationDatabase.GetLatestVerificationRequestSentTo(contactID) @@ -279,7 +283,7 @@ func (m *Messenger) AcceptContactVerificationRequest(ctx context.Context, id str contact, ok := m.allContacts.Load(contactID) if !ok || !contact.mutual() { - return nil, errors.New("must be a mutual contact") + return nil, ErrContactNotMutual } chat, ok := m.allChats.Load(contactID) @@ -394,7 +398,7 @@ func (m *Messenger) VerifiedTrusted(ctx context.Context, request *requests.Verif contact, ok := m.allContacts.Load(contactID) if !ok || !contact.mutual() { - return nil, errors.New("must be a mutual contact") + return nil, ErrContactNotMutual } err = m.setTrustStatusForContact(context.Background(), contactID, verification.TrustStatusTRUSTED) @@ -589,7 +593,7 @@ func (m *Messenger) DeclineContactVerificationRequest(ctx context.Context, id st contact, ok := m.allContacts.Load(verifRequest.From) if !ok || !contact.mutual() { - return nil, errors.New("must be a mutual contact") + return nil, ErrContactNotMutual } contactID := verifRequest.From contact, err = m.setContactVerificationStatus(contactID, VerificationStatusVERIFIED) @@ -686,7 +690,7 @@ func (m *Messenger) DeclineContactVerificationRequest(ctx context.Context, id st func (m *Messenger) setContactVerificationStatus(contactID string, verificationStatus VerificationStatus) (*Contact, error) { contact, ok := m.allContacts.Load(contactID) if !ok || !contact.mutual() { - return nil, errors.New("must be a mutual contact") + return nil, ErrContactNotMutual } contact.VerificationStatus = verificationStatus @@ -714,6 +718,11 @@ func (m *Messenger) setContactVerificationStatus(contactID string, verificationS } func (m *Messenger) setTrustStatusForContact(ctx context.Context, contactID string, trustStatus verification.TrustStatus) error { + contact, ok := m.allContacts.Load(contactID) + if !ok { + return ErrContactNotFound + } + currentTime := m.getTimesource().GetCurrentTime() err := m.verificationDatabase.SetTrustStatus(contactID, trustStatus, currentTime) @@ -721,6 +730,9 @@ func (m *Messenger) setTrustStatusForContact(ctx context.Context, contactID stri return err } + contact.TrustStatus = trustStatus + m.allContacts.Store(contactID, contact) + return m.SyncTrustedUser(ctx, contactID, trustStatus, m.dispatchMessage) } @@ -784,7 +796,7 @@ func (m *Messenger) HandleRequestContactVerification(state *ReceivedMessageState contact := state.CurrentMessageState.Contact if !contact.mutual() { m.logger.Debug("Received a verification request for a non added mutual contact", zap.String("contactID", contactID)) - return errors.New("must be a mutual contact") + return ErrContactNotMutual } persistedVR, err := m.verificationDatabase.GetVerificationRequest(id) @@ -875,7 +887,7 @@ func (m *Messenger) HandleAcceptContactVerification(state *ReceivedMessageState, contact := state.CurrentMessageState.Contact if !contact.mutual() { m.logger.Debug("Received a verification response for a non mutual contact", zap.String("contactID", contactID)) - return errors.New("must be a mutual contact") + return ErrContactNotMutual } persistedVR, err := m.verificationDatabase.GetVerificationRequest(request.Id) @@ -964,7 +976,7 @@ func (m *Messenger) HandleDeclineContactVerification(state *ReceivedMessageState contact := state.CurrentMessageState.Contact if !contact.mutual() { m.logger.Debug("Received a verification decline for a non mutual contact", zap.String("contactID", contactID)) - return errors.New("must be a mutual contact") + return ErrContactNotMutual } persistedVR, err := m.verificationDatabase.GetVerificationRequest(request.Id) diff --git a/protocol/messenger_contact_verification_test.go b/protocol/messenger_contact_verification_test.go index ee46692bbeb..8ff0ff69613 100644 --- a/protocol/messenger_contact_verification_test.go +++ b/protocol/messenger_contact_verification_test.go @@ -769,3 +769,50 @@ func (s *MessengerVerificationRequests) newMessenger(shh types.Waku) *Messenger s.Require().NoError(err) return messenger } + +func (s *MessengerVerificationRequests) TestTrustStatus() { + theirMessenger := s.newMessenger(s.shh) + defer TearDownMessenger(&s.Suite, theirMessenger) + + s.mutualContact(theirMessenger) + + theirPk := types.EncodeHex(crypto.FromECDSAPub(&theirMessenger.identity.PublicKey)) + + // Test Mark as Trusted + err := s.m.MarkAsTrusted(context.Background(), theirPk) + s.Require().NoError(err) + + contactFromCache, ok := s.m.allContacts.Load(theirPk) + s.Require().True(ok) + s.Require().Equal(verification.TrustStatusTRUSTED, contactFromCache.TrustStatus) + trustStatusFromDb, err := s.m.GetTrustStatus(theirPk) + s.Require().NoError(err) + s.Require().Equal(verification.TrustStatusTRUSTED, trustStatusFromDb) + + // Test Remove Trust Mark + err = s.m.RemoveTrustStatus(context.Background(), theirPk) + s.Require().NoError(err) + + contactFromCache, ok = s.m.allContacts.Load(theirPk) + s.Require().True(ok) + s.Require().Equal(verification.TrustStatusUNKNOWN, contactFromCache.TrustStatus) + trustStatusFromDb, err = s.m.GetTrustStatus(theirPk) + s.Require().NoError(err) + s.Require().Equal(verification.TrustStatusUNKNOWN, trustStatusFromDb) + + // Test Mark as Untrustoworthy + err = s.m.MarkAsUntrustworthy(context.Background(), theirPk) + s.Require().NoError(err) + + contactFromCache, ok = s.m.allContacts.Load(theirPk) + s.Require().True(ok) + s.Require().Equal(verification.TrustStatusUNTRUSTWORTHY, contactFromCache.TrustStatus) + trustStatusFromDb, err = s.m.GetTrustStatus(theirPk) + s.Require().NoError(err) + s.Require().Equal(verification.TrustStatusUNTRUSTWORTHY, trustStatusFromDb) + + // Test calling with an unknown contact + err = s.m.MarkAsTrusted(context.Background(), "0x00000123") + s.Require().Error(err) + s.Require().Equal("contact not found", err.Error()) +}