From 1d17dd8192333f5a9215e4cd10240c9279163f38 Mon Sep 17 00:00:00 2001 From: Artem Poltorzhitskiy <aopoltorzhicky@gmail.com> Date: Tue, 6 Jul 2021 15:49:35 +0300 Subject: [PATCH] Refactoring: better logging lib (#730) --- cmd/api/handlers/error.go | 2 +- cmd/api/main.go | 8 +- cmd/graphql/main.go | 9 +- cmd/indexer/indexer/boost.go | 50 +++---- cmd/indexer/indexer/protocol.go | 2 +- cmd/indexer/main.go | 9 +- cmd/metrics/bigmapdiff.go | 2 +- cmd/metrics/bulk.go | 6 +- cmd/metrics/contract.go | 2 +- cmd/metrics/main.go | 17 +-- cmd/metrics/operations.go | 2 +- cmd/metrics/time-based.go | 2 +- go.mod | 2 +- go.sum | 11 ++ internal/bcd/base/node.go | 2 +- internal/bcd/translator/converter.go | 4 +- internal/bcd/translator/converter_test.go | 4 +- .../classification/metrics/array_metric.go | 4 +- .../metrics/bin_mask_metrics.go | 4 +- internal/elastic/elastic.go | 2 +- internal/elastic/search.go | 2 +- internal/events/general.go | 2 +- internal/events/michelson_extended_storage.go | 4 +- internal/events/michelson_parameter.go | 4 +- internal/handlers/token_metadata.go | 2 +- internal/handlers/tzip.go | 2 +- internal/helpers/json.go | 4 +- internal/helpers/sentry.go | 4 +- internal/logger/logger.go | 106 ++++----------- internal/models/bigmapdiff/bigmapstate.go | 5 +- internal/models/bigmapdiff/model.go | 5 +- internal/models/contract/model.go | 5 +- internal/models/dapp/model.go | 5 +- internal/models/migration/model.go | 5 +- internal/models/operation/model.go | 5 +- internal/models/tokenbalance/model.go | 5 +- internal/models/tokenmetadata/model.go | 5 +- internal/models/transfer/model.go | 5 +- internal/models/tzip/model.go | 5 +- internal/mq/nats.go | 2 +- internal/mq/rabbit.go | 2 +- internal/noderpc/rpc.go | 4 +- internal/parsers/ledger/ledger.go | 2 +- internal/parsers/operations/migration.go | 2 +- internal/parsers/operations/origination.go | 2 +- internal/parsers/operations/test_common.go | 128 +++++++++--------- internal/parsers/operations/transaction.go | 2 +- internal/parsers/stacktrace/stacktrace.go | 2 +- internal/parsers/transfer/transfer.go | 2 +- internal/parsers/tzip/parser.go | 4 +- internal/parsers/tzip/tokens/metadata.go | 4 +- internal/parsers/tzip/tokens/parser.go | 6 +- internal/postgres/core/postgres.go | 2 +- internal/rollback/rollback.go | 2 +- internal/tzkt/request.go | 2 +- internal/tzkt/services.go | 2 +- scripts/api_tester/account.go | 6 +- scripts/api_tester/bigmapdiff.go | 8 +- scripts/api_tester/contract.go | 28 ++-- scripts/api_tester/general.go | 12 +- scripts/api_tester/main.go | 3 +- scripts/bcdctl/main.go | 23 ++-- scripts/bcdctl/rollback.go | 6 +- scripts/migration/main.go | 13 +- .../migrations/big_map_action_to_enum.go | 8 +- .../migration/migrations/big_russian_boss.go | 6 +- .../migration/migrations/create_transfers.go | 4 +- scripts/migration/migrations/create_tzip.go | 2 +- .../migration/migrations/enum_to_smallint.go | 6 +- .../migrations/extended_storage_events.go | 12 +- scripts/migration/migrations/fill_tzip.go | 6 +- scripts/migration/migrations/fix_id.go | 4 +- scripts/migration/migrations/get_aliases.go | 11 +- scripts/migration/migrations/nft_metadata.go | 4 +- .../migrations/operation_kind_to_enum.go | 8 +- .../migration/migrations/parameter_events.go | 12 +- scripts/migration/migrations/protocol.go | 38 +++--- scripts/migration/migrations/tags_to int.go | 2 +- .../migrations/token_balance_recalc.go | 10 +- .../migrations/token_metadata_unknown.go | 4 +- scripts/nginx/main.go | 18 ++- scripts/nginx/nginx.go | 4 +- scripts/nginx/sitemap.go | 2 +- 83 files changed, 357 insertions(+), 397 deletions(-) diff --git a/cmd/api/handlers/error.go b/cmd/api/handlers/error.go index 154eea165..ae4c5b2e0 100644 --- a/cmd/api/handlers/error.go +++ b/cmd/api/handlers/error.go @@ -24,7 +24,7 @@ func (ctx *Context) handleError(c *gin.Context, err error, code int) bool { if hub := sentrygin.GetHubFromContext(c); hub != nil { hub.CaptureMessage(err.Error()) } - logger.Error(err) + logger.Err(err) } } diff --git a/cmd/api/main.go b/cmd/api/main.go index f792213b6..861b6bbac 100644 --- a/cmd/api/main.go +++ b/cmd/api/main.go @@ -27,7 +27,7 @@ type app struct { func newApp() *app { cfg, err := config.LoadDefaultConfig() if err != nil { - logger.Fatal(err) + panic(err) } docs.SwaggerInfo.Host = cfg.API.SwaggerHost @@ -40,7 +40,7 @@ func newApp() *app { ctx, err := handlers.NewContext(cfg) if err != nil { - logger.Error(err) + logger.Err(err) helpers.CatchErrorSentry(err) return nil } @@ -62,7 +62,7 @@ func (api *app) makeRouter() { if v, ok := binding.Validator.Engine().(*validator.Validate); ok { if err := validations.Register(v, api.Context.Config.API); err != nil { - logger.Fatal(err) + panic(err) } } @@ -218,7 +218,7 @@ func (api *app) Close() { func (api *app) Run() { if err := api.Router.Run(api.Context.Config.API.Bind); err != nil { - logger.Error(err) + logger.Err(err) helpers.CatchErrorSentry(err) return } diff --git a/cmd/graphql/main.go b/cmd/graphql/main.go index cb0dd5eda..ce7875949 100644 --- a/cmd/graphql/main.go +++ b/cmd/graphql/main.go @@ -10,7 +10,6 @@ import ( "strings" "github.com/baking-bad/bcdhub/internal/config" - "github.com/baking-bad/bcdhub/internal/logger" "github.com/dosco/graphjin/core" "github.com/gin-gonic/gin" _ "github.com/jackc/pgx/v4/stdlib" @@ -75,16 +74,16 @@ func initUser() error { func main() { cfg, err := config.LoadDefaultConfig() if err != nil { - logger.Fatal(err) + panic(err) } if err := initUser(); err != nil { - logger.Fatal(err) + panic(err) } db, err := sql.Open("pgx", cfg.GraphQL.DB) if err != nil { - logger.Fatal(err) + panic(err) } defer db.Close() @@ -98,7 +97,7 @@ func main() { Debug: true, }, db) if err != nil { - logger.Fatal(err) + panic(err) } ctx := apiContext{ diff --git a/cmd/indexer/indexer/boost.go b/cmd/indexer/indexer/boost.go index 3a8c7d46a..9233368c9 100644 --- a/cmd/indexer/indexer/boost.go +++ b/cmd/indexer/indexer/boost.go @@ -48,7 +48,7 @@ type BoostIndexer struct { } func (bi *BoostIndexer) fetchExternalProtocols() error { - logger.WithNetwork(bi.Network).Info("Fetching external protocols") + logger.Info().Str("network", bi.Network.String()).Msg("Fetching external protocols") existingProtocols, err := bi.Protocols.GetByNetworkWithSort(bi.Network, "start_level", "desc") if err != nil { return err @@ -94,7 +94,7 @@ func (bi *BoostIndexer) fetchExternalProtocols() error { } protocols = append(protocols, newProtocol) - logger.WithNetwork(bi.Network).Infof("Fetched %s", alias) + logger.Info().Str("network", bi.Network.String()).Msgf("Fetched %s", alias) } return bi.Storage.Save(protocols) @@ -102,7 +102,7 @@ func (bi *BoostIndexer) fetchExternalProtocols() error { // NewBoostIndexer - func NewBoostIndexer(cfg config.Config, network types.Network, opts ...BoostIndexerOption) (*BoostIndexer, error) { - logger.WithNetwork(network).Info("Creating indexer object...") + logger.Info().Str("network", network.String()).Msg("Creating indexer object...") rpcProvider, ok := cfg.RPC[network.String()] if !ok { @@ -184,7 +184,7 @@ func (bi *BoostIndexer) init(db *core.Postgres) error { return err } bi.state = currentState - logger.WithNetwork(bi.Network).Infof("Current indexer state: %d", currentState.Level) + logger.Info().Str("network", bi.Network.String()).Msgf("Current indexer state: %d", currentState.Level) currentProtocol, err := bi.Protocols.Get(bi.Network, "", currentState.Level) if err != nil { @@ -207,7 +207,7 @@ func (bi *BoostIndexer) init(db *core.Postgres) error { } bi.currentProtocol = currentProtocol - logger.WithNetwork(bi.Network).Infof("Current network protocol: %s", currentProtocol.Hash) + logger.Info().Str("network", bi.Network.String()).Msgf("Current network protocol: %s", currentProtocol.Hash) return nil } @@ -221,7 +221,7 @@ func (bi *BoostIndexer) Sync(wg *sync.WaitGroup) { // First tick if err := bi.process(); err != nil { - logger.Error(err) + logger.Err(err) helpers.CatchErrorSentry(err) } if bi.stopped { @@ -249,7 +249,7 @@ func (bi *BoostIndexer) Sync(wg *sync.WaitGroup) { } continue } - logger.Error(err) + logger.Err(err) helpers.CatchErrorSentry(err) } @@ -274,7 +274,7 @@ func (bi *BoostIndexer) setUpdateTicker(seconds int) { } else { duration = time.Duration(seconds) * time.Second } - logger.WithNetwork(bi.Network).Infof("Data will be updated every %.0f seconds", duration.Seconds()) + logger.Info().Str("network", bi.Network.String()).Msgf("Data will be updated every %.0f seconds", duration.Seconds()) bi.updateTicker = time.NewTicker(duration) } @@ -322,10 +322,10 @@ func (bi *BoostIndexer) handleBlock(head noderpc.Header) error { result := parsers.NewResult() err := bi.StorageDB.DB.Transaction( func(tx *gorm.DB) error { - logger.WithNetwork(bi.Network).Infof("indexing %d block", head.Level) + logger.Info().Str("network", bi.Network.String()).Msgf("indexing %d block", head.Level) if head.Protocol != bi.currentProtocol.Hash { - logger.WithNetwork(bi.Network).Infof("New protocol detected: %s -> %s", bi.currentProtocol.Hash, head.Protocol) + logger.Info().Str("network", bi.Network.String()).Msgf("New protocol detected: %s -> %s", bi.currentProtocol.Hash, head.Protocol) if err := bi.migrate(head, tx); err != nil { return err @@ -358,7 +358,7 @@ func (bi *BoostIndexer) handleBlock(head noderpc.Header) error { // Rollback - func (bi *BoostIndexer) Rollback() error { - logger.WithNetwork(bi.Network).Warningf("Rollback from %d", bi.state.Level) + logger.Warning().Str("network", bi.Network.String()).Msgf("Rollback from %d", bi.state.Level) lastLevel, err := bi.getLastRollbackBlock() if err != nil { @@ -377,8 +377,8 @@ func (bi *BoostIndexer) Rollback() error { return err } bi.state = newState - logger.WithNetwork(bi.Network).Infof("New indexer state: %d", bi.state.Level) - logger.WithNetwork(bi.Network).Info("Rollback finished") + logger.Info().Str("network", bi.Network.String()).Msgf("New indexer state: %d", bi.state.Level) + logger.Info().Str("network", bi.Network.String()).Msg("Rollback finished") return nil } @@ -398,7 +398,7 @@ func (bi *BoostIndexer) getLastRollbackBlock() (int64, error) { } if block.Predecessor == headAtLevel.Predecessor { - logger.WithNetwork(bi.Network).Warnf("Found equal predecessors at level: %d", block.Level) + logger.Warning().Str("network", bi.Network.String()).Msgf("Found equal predecessors at level: %d", block.Level) end = true lastLevel = block.Level - 1 } @@ -438,8 +438,8 @@ func (bi *BoostIndexer) process() error { return errors.Errorf("Invalid chain_id: %s (state) != %s (head)", bi.state.ChainID, head.ChainID) } - logger.WithNetwork(bi.Network).Infof("Current node state: %d", head.Level) - logger.WithNetwork(bi.Network).Infof("Current indexer state: %d", bi.state.Level) + logger.Info().Str("network", bi.Network.String()).Msgf("Current node state: %d", head.Level) + logger.Info().Str("network", bi.Network.String()).Msgf("Current indexer state: %d", bi.state.Level) if head.Level > bi.state.Level { levels := make([]int64, 0) @@ -454,7 +454,7 @@ func (bi *BoostIndexer) process() error { } } - logger.WithNetwork(bi.Network).Infof("Found %d new levels", len(levels)) + logger.Info().Str("network", bi.Network.String()).Msgf("Found %d new levels", len(levels)) if err := bi.Index(levels); err != nil { if errors.Is(err, errBcdQuit) { @@ -474,7 +474,7 @@ func (bi *BoostIndexer) process() error { if bi.boost { bi.boost = false } - logger.WithNetwork(bi.Network).Info("Synced") + logger.Info().Str("network", bi.Network.String()).Msg("Synced") return nil } else if head.Level < bi.state.Level { if err := bi.Rollback(); err != nil { @@ -561,7 +561,7 @@ func (bi *BoostIndexer) getDataFromBlock(head noderpc.Header) (*parsers.Result, func (bi *BoostIndexer) migrate(head noderpc.Header, tx *gorm.DB) error { if bi.currentProtocol.EndLevel == 0 && head.Level > 1 { - logger.WithNetwork(bi.Network).Infof("Finalizing the previous protocol: %s", bi.currentProtocol.Alias) + logger.Info().Str("network", bi.Network.String()).Msgf("Finalizing the previous protocol: %s", bi.currentProtocol.Alias) bi.currentProtocol.EndLevel = head.Level - 1 if err := bi.currentProtocol.Save(bi.StorageDB.DB); err != nil { return err @@ -570,7 +570,7 @@ func (bi *BoostIndexer) migrate(head noderpc.Header, tx *gorm.DB) error { newProtocol, err := bi.Protocols.Get(bi.Network, head.Protocol, head.Level) if err != nil { - logger.Warning("%s", err) + logger.Warning().Str("network", bi.Network.String()).Msgf("%s", err) newProtocol, err = createProtocol(bi.rpc, bi.Network, head.Protocol, head.Level) if err != nil { return err @@ -593,7 +593,7 @@ func (bi *BoostIndexer) migrate(head noderpc.Header, tx *gorm.DB) error { return err } } else { - logger.WithNetwork(bi.Network).Infof("Same symlink %s for %s / %s", + logger.Info().Str("network", bi.Network.String()).Msgf("Same symlink %s for %s / %s", newProtocol.SymLink, bi.currentProtocol.Alias, newProtocol.Alias) } } @@ -601,24 +601,24 @@ func (bi *BoostIndexer) migrate(head noderpc.Header, tx *gorm.DB) error { bi.currentProtocol = newProtocol bi.setUpdateTicker(0) - logger.WithNetwork(bi.Network).Infof("Migration to %s is completed", bi.currentProtocol.Alias) + logger.Info().Str("network", bi.Network.String()).Msgf("Migration to %s is completed", bi.currentProtocol.Alias) return nil } func (bi *BoostIndexer) standartMigration(newProtocol protocol.Protocol, head noderpc.Header, tx *gorm.DB) error { - logger.WithNetwork(bi.Network).Info("Try to find migrations...") + logger.Info().Str("network", bi.Network.String()).Msg("Try to find migrations...") contracts, err := bi.Contracts.GetMany(map[string]interface{}{ "network": bi.Network, }) if err != nil { return err } - logger.WithNetwork(bi.Network).Infof("Now %d contracts are indexed", len(contracts)) + logger.Info().Str("network", bi.Network.String()).Msgf("Now %d contracts are indexed", len(contracts)) p := migrations.NewMigrationParser(bi.Storage, bi.BigMapDiffs, bi.Config.SharePath) for i := range contracts { - logger.WithNetwork(bi.Network).Infof("Migrate %s...", contracts[i].Address) + logger.Info().Str("network", bi.Network.String()).Msgf("Migrate %s...", contracts[i].Address) script, err := bi.rpc.GetScriptJSON(contracts[i].Address, newProtocol.StartLevel) if err != nil { return err diff --git a/cmd/indexer/indexer/protocol.go b/cmd/indexer/indexer/protocol.go index 7e74744b2..8c6b4939e 100644 --- a/cmd/indexer/indexer/protocol.go +++ b/cmd/indexer/indexer/protocol.go @@ -9,7 +9,7 @@ import ( ) func createProtocol(rpc noderpc.INode, network types.Network, hash string, level int64) (protocol protocol.Protocol, err error) { - logger.WithNetwork(network).Infof("Creating new protocol %s starting at %d", hash, level) + logger.Info().Str("network", network.String()).Msgf("Creating new protocol %s starting at %d", hash, level) protocol.SymLink, err = bcd.GetProtoSymLink(hash) if err != nil { return diff --git a/cmd/indexer/main.go b/cmd/indexer/main.go index 07e96c696..1638b2151 100644 --- a/cmd/indexer/main.go +++ b/cmd/indexer/main.go @@ -16,7 +16,8 @@ import ( func main() { cfg, err := config.LoadDefaultConfig() if err != nil { - logger.Fatal(err) + logger.Err(err) + return } if cfg.Indexer.SentryEnabled { @@ -27,7 +28,7 @@ func main() { indexers, err := indexer.CreateIndexers(cfg) if err != nil { - logger.Error(err) + logger.Err(err) helpers.CatchErrorSentry(err) return } @@ -36,7 +37,7 @@ func main() { if countCPU > len(indexers)+1 { countCPU = len(indexers) + 1 } - logger.Warning("Indexer started on %d CPU cores", countCPU) + logger.Warning().Msgf("Indexer started on %d CPU cores", countCPU) runtime.GOMAXPROCS(countCPU) sigChan := make(chan os.Signal, 1) @@ -54,5 +55,5 @@ func main() { go indexers[i].Stop() } wg.Wait() - logger.Info("Stopped") + logger.Info().Msg("Stopped") } diff --git a/cmd/metrics/bigmapdiff.go b/cmd/metrics/bigmapdiff.go index 5d8a20fc0..dcd1bc171 100644 --- a/cmd/metrics/bigmapdiff.go +++ b/cmd/metrics/bigmapdiff.go @@ -30,7 +30,7 @@ func getBigMapDiff(ids []int64) error { items = append(items, res...) } - logger.WithField("models", len(items)).Infof("%2d big map diff processed", len(bmd)) + logger.Info().Int("models", len(items)).Msgf("%2d big map diff processed", len(bmd)) if len(items) > 0 { if err := ctx.Storage.Save(items); err != nil { diff --git a/cmd/metrics/bulk.go b/cmd/metrics/bulk.go index ab2ab0ecf..2e2ce45ff 100644 --- a/cmd/metrics/bulk.go +++ b/cmd/metrics/bulk.go @@ -59,18 +59,18 @@ func (bm *BulkManager) process(force bool) bool { for i := range bm.queue { id, err := parseID(bm.queue[i].Body) if err != nil { - logger.Error(err) + logger.Err(err) continue } ids[i] = id } if err := bm.handler(ids); err != nil { - logger.Error(err) + logger.Err(err) return false } for i := range bm.queue { if err := bm.queue[i].Ack(false); err != nil { - logger.Errorf("Error acknowledging message: %s", err) + logger.Error().Msgf("Error acknowledging message: %s", err) return false } } diff --git a/cmd/metrics/contract.go b/cmd/metrics/contract.go index 2d79cda72..1b24a66cb 100644 --- a/cmd/metrics/contract.go +++ b/cmd/metrics/contract.go @@ -25,7 +25,7 @@ func getContract(ids []int64) error { updates = append(updates, res...) } - logger.Info("%2d contracts are processed", len(contracts)) + logger.Info().Msgf("%2d contracts are processed", len(contracts)) if err := saveSearchModels(ctx.Searcher, updates); err != nil { return err diff --git a/cmd/metrics/main.go b/cmd/metrics/main.go index b82fe3235..861040072 100644 --- a/cmd/metrics/main.go +++ b/cmd/metrics/main.go @@ -41,14 +41,14 @@ func listenChannel(messageQueue mq.IMessageReceiver, queue string, closeChan cha msgs, err := messageQueue.Consume(queue) if err != nil { - logger.Error(err) + logger.Err(err) return } ticker := time.NewTicker(time.Second * time.Duration(15)) defer ticker.Stop() - logger.Info("Connected to %s queue", queue) + logger.Info().Msgf("Connected to %s queue", queue) for { select { case <-ticker.C: @@ -56,7 +56,7 @@ func listenChannel(messageQueue mq.IMessageReceiver, queue string, closeChan cha manager.Exec() } case <-closeChan: - logger.Info("Stopped %s queue", queue) + logger.Info().Msgf("Stopped %s queue", queue) return case msg := <-msgs: if manager, ok := managers[msg.RoutingKey]; ok { @@ -65,22 +65,22 @@ func listenChannel(messageQueue mq.IMessageReceiver, queue string, closeChan cha } if msg.RoutingKey == "" { - logger.Warning("[%s] Rabbit MQ server stopped! Metrics service need to be restarted. Closing connection...", queue) + logger.Warning().Msgf("[%s] Rabbit MQ server stopped! Metrics service need to be restarted. Closing connection...", queue) return } - logger.Errorf("Unknown data routing key %s", msg.RoutingKey) + logger.Error().Msgf("Unknown data routing key %s", msg.RoutingKey) helpers.LocalCatchErrorSentry(localSentry, errors.Errorf("[listenChannel] %s", err.Error())) } } } func main() { - logger.Warning("Metrics started on 5 CPU cores") + logger.Warning().Msg("Metrics started on 5 CPU cores") runtime.GOMAXPROCS(5) cfg, err := config.LoadDefaultConfig() if err != nil { - logger.Fatal(err) + logger.Err(err) } if cfg.Metrics.SentryEnabled { @@ -106,7 +106,8 @@ func main() { } if err := ctx.Searcher.CreateIndexes(); err != nil { - logger.Fatal(err) + logger.Err(err) + return } var wg sync.WaitGroup diff --git a/cmd/metrics/operations.go b/cmd/metrics/operations.go index 080477b60..e050f1aca 100644 --- a/cmd/metrics/operations.go +++ b/cmd/metrics/operations.go @@ -17,7 +17,7 @@ func getOperation(ids []int64) error { for i := range operations { updated = append(updated, &operations[i]) } - logger.Info("%2d operations are processed", len(operations)) + logger.Info().Msgf("%2d operations are processed", len(operations)) return saveSearchModels(ctx.Searcher, updated) } diff --git a/cmd/metrics/time-based.go b/cmd/metrics/time-based.go index fab427d0a..0540e5806 100644 --- a/cmd/metrics/time-based.go +++ b/cmd/metrics/time-based.go @@ -21,7 +21,7 @@ func timeBasedTask(period time.Duration, handler func() error, closeChan chan st return case <-ticker.C: if err := handler(); err != nil { - logger.Error(err) + logger.Err(err) } } } diff --git a/go.mod b/go.mod index 28b53590f..1c74f438d 100644 --- a/go.mod +++ b/go.mod @@ -29,10 +29,10 @@ require ( github.com/nats-io/nats-server/v2 v2.1.9 // indirect github.com/nats-io/nats.go v1.10.0 github.com/pkg/errors v0.9.1 + github.com/rs/zerolog v1.15.0 github.com/schollz/progressbar/v3 v3.1.1 github.com/sergi/go-diff v1.1.0 github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc - github.com/sirupsen/logrus v1.7.0 github.com/streadway/amqp v1.0.0 github.com/stretchr/testify v1.7.0 github.com/swaggo/swag v1.7.0 diff --git a/go.sum b/go.sum index 7add68d82..a3383b499 100644 --- a/go.sum +++ b/go.sum @@ -54,17 +54,20 @@ github.com/Azure/azure-amqp-common-go/v3 v3.0.1/go.mod h1:PBIGdzcO1teYoufTKMcGib github.com/Azure/azure-amqp-common-go/v3 v3.1.0/go.mod h1:PBIGdzcO1teYoufTKMcGibdKaYZv4avS+O6LNIp8bq0= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v37.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v49.0.0+incompatible h1:rvYYNgKNBwoxUaBFmd/+TpW3qrd805EHBBvUp5FmFso= github.com/Azure/azure-sdk-for-go v49.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-service-bus-go v0.10.7/go.mod h1:o5z/3lDG1iT/T/G7vgIwIqVDTx9Qa2wndf5OdzSzpF8= github.com/Azure/azure-storage-blob-go v0.13.0/go.mod h1:pA9kNqtjUeQF2zOSu4s//nUdBD+e64lEuc4sVnuOfNs= github.com/Azure/go-amqp v0.13.0/go.mod h1:qj+o8xPCz9tMSbQ83Vp8boHahuRDl5mkNHyt1xlxUTs= github.com/Azure/go-amqp v0.13.1/go.mod h1:qj+o8xPCz9tMSbQ83Vp8boHahuRDl5mkNHyt1xlxUTs= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.11.3/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.7/go.mod h1:V6p3pKZx1KKkJubbxnDWrzNhEIfOy/pTGasLqzHIPHs= github.com/Azure/go-autorest/autorest v0.11.9/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest v0.11.12 h1:gI8ytXbxMfI+IVbI9mP2JGCTXIuhHLgRlvQ9X4PsnHE= github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= @@ -72,22 +75,28 @@ github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQW github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= github.com/Azure/go-autorest/autorest/adal v0.9.4/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.6 h1:d3pSDwvBWBLqdA91u+keH1zs1cCEzrQdHKY6iqbQNkE= github.com/Azure/go-autorest/autorest/adal v0.9.6/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/azure/auth v0.5.3/go.mod h1:4bJZhUhcq8LB20TruwHbAQsmUs2Xh+QR7utuJpLXX3A= github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= +github.com/Azure/go-autorest/autorest/validation v0.3.0 h1:3I9AAI63HfcLtphd9g39ruUwRI+Ca+z/f36KHPFRUss= github.com/Azure/go-autorest/autorest/validation v0.3.0/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -317,6 +326,7 @@ github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= @@ -1074,6 +1084,7 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0 h1:uPRuwkWF4J6fGsJ2R0Gn2jB1EQiav9k3S6CSdygQJXY= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= diff --git a/internal/bcd/base/node.go b/internal/bcd/base/node.go index b2a5203fe..435c69062 100644 --- a/internal/bcd/base/node.go +++ b/internal/bcd/base/node.go @@ -164,7 +164,7 @@ func (node *Node) IsLambda() bool { } b, err := hex.DecodeString(input[22:24]) if err != nil { - logger.Error(err) + logger.Err(err) return false } if len(b) != 1 { diff --git a/internal/bcd/translator/converter.go b/internal/bcd/translator/converter.go index 5c76a4115..cb75321b7 100644 --- a/internal/bcd/translator/converter.go +++ b/internal/bcd/translator/converter.go @@ -69,11 +69,11 @@ func (c Converter) FromString(input string) (string, error) { func (c Converter) trace() { if c.debug { c.parser.TracerEnter = func(name string, s string, v *peg.Values, d peg.Any, p int) { - logger.Info("Enter: %s %d %d", name, p, len(s)) + logger.Info().Msgf("Enter: %s %d %d", name, p, len(s)) } c.parser.TracerLeave = func(name string, s string, v *peg.Values, d peg.Any, p int, l int) { if l != -1 { - logger.Info("Leave: %s %d %d", name, len(s), l+p) + logger.Info().Msgf("Leave: %s %d %d", name, len(s), l+p) } } } diff --git a/internal/bcd/translator/converter_test.go b/internal/bcd/translator/converter_test.go index 7ae03a3a9..de2bae196 100644 --- a/internal/bcd/translator/converter_test.go +++ b/internal/bcd/translator/converter_test.go @@ -5,14 +5,14 @@ import ( "io/ioutil" "testing" - "github.com/baking-bad/bcdhub/internal/logger" "github.com/stretchr/testify/assert" ) func TestConverter_FromFile(t *testing.T) { files, err := ioutil.ReadDir("./tests/") if err != nil { - logger.Fatal(err) + t.Errorf("ioutil.ReadDir(./tests/) error = %v", err) + return } c, err := NewConverter() diff --git a/internal/classification/metrics/array_metric.go b/internal/classification/metrics/array_metric.go index 94d3982a7..9065b19ee 100644 --- a/internal/classification/metrics/array_metric.go +++ b/internal/classification/metrics/array_metric.go @@ -29,13 +29,13 @@ func (m *Array) Compute(a, b contract.Contract) Feature { aArr, err := m.getContractFieldArray(a) if err != nil { - logger.Error(err) + logger.Err(err) return f } bArr, err := m.getContractFieldArray(b) if err != nil { - logger.Error(err) + logger.Err(err) return f } diff --git a/internal/classification/metrics/bin_mask_metrics.go b/internal/classification/metrics/bin_mask_metrics.go index 3c27803aa..9155dc48a 100644 --- a/internal/classification/metrics/bin_mask_metrics.go +++ b/internal/classification/metrics/bin_mask_metrics.go @@ -30,13 +30,13 @@ func (m *BinMask) Compute(a, b contract.Contract) Feature { mask1, err := m.getContractFieldBinMask(a) if err != nil { - logger.Error(err) + logger.Err(err) return f } mask2, err := m.getContractFieldBinMask(b) if err != nil { - logger.Error(err) + logger.Err(err) return f } diff --git a/internal/elastic/elastic.go b/internal/elastic/elastic.go index e8de1274c..0aa8a4873 100644 --- a/internal/elastic/elastic.go +++ b/internal/elastic/elastic.go @@ -54,7 +54,7 @@ func WaitNew(addresses []string, timeout int) *Elastic { for es == nil { es, err = New(addresses) if err != nil { - logger.Warning("Waiting elastic up %d seconds...", timeout) + logger.Warning().Msgf("Waiting elastic up %d seconds...", timeout) time.Sleep(time.Second * time.Duration(timeout)) } } diff --git a/internal/elastic/search.go b/internal/elastic/search.go index ca235fe19..1284b394f 100644 --- a/internal/elastic/search.go +++ b/internal/elastic/search.go @@ -129,7 +129,7 @@ func (e *Elastic) ByText(text string, offset int64, fields []string, filters map items, err = parseSearchResponse(response) } if err != nil { - logger.Error(err) + logger.Err(err) return search.Result{}, nil } diff --git a/internal/events/general.go b/internal/events/general.go index 0775f69b0..75d6da7c9 100644 --- a/internal/events/general.go +++ b/internal/events/general.go @@ -57,7 +57,7 @@ func (sections Sections) GetCode() ([]byte, error) { func Execute(rpc noderpc.INode, event Event, ctx Context) ([]tokenbalance.TokenBalance, error) { parameter := event.Normalize(ctx.Parameters) if parameter == nil { - logger.Warning("%s event failed", ctx.Network) + logger.Warning().Msgf("%s event failed", ctx.Network) return nil, nil } storage := []byte(`[]`) diff --git a/internal/events/michelson_extended_storage.go b/internal/events/michelson_extended_storage.go index c2e7eced3..e7878480a 100644 --- a/internal/events/michelson_extended_storage.go +++ b/internal/events/michelson_extended_storage.go @@ -59,13 +59,13 @@ func (mes *MichelsonExtendedStorage) Normalize(value *ast.TypedAst) []byte { } if err := storage.Enrich(value, mes.bmd, true, false); err != nil { - logger.Warning("MichelsonExtendedStorage.Normalize %s", err.Error()) + logger.Warning().Msgf("MichelsonExtendedStorage.Normalize %s", err.Error()) return nil } b, err := value.ToParameters("") if err != nil { - logger.Warning("MichelsonExtendedStorage.Normalize %s", err.Error()) + logger.Warning().Msgf("MichelsonExtendedStorage.Normalize %s", err.Error()) return nil } return b diff --git a/internal/events/michelson_parameter.go b/internal/events/michelson_parameter.go index 8593039bb..dd79e3eb6 100644 --- a/internal/events/michelson_parameter.go +++ b/internal/events/michelson_parameter.go @@ -55,12 +55,12 @@ func (event *MichelsonParameter) Normalize(value *ast.TypedAst) []byte { result, _ := value.UnwrapAndGetEntrypointName() if result == nil { - logger.Warning("MichelsonParameter.Normalize: can't unwrap") + logger.Warning().Msgf("MichelsonParameter.Normalize: can't unwrap") return nil } b, err := result.ToParameters() if err != nil { - logger.Warning("MichelsonParameter.Normalize %s", err.Error()) + logger.Warning().Msgf("MichelsonParameter.Normalize %s", err.Error()) return nil } return b diff --git a/internal/handlers/token_metadata.go b/internal/handlers/token_metadata.go index 1cd499201..e5dec39fb 100644 --- a/internal/handlers/token_metadata.go +++ b/internal/handlers/token_metadata.go @@ -41,7 +41,7 @@ func (t *TokenMetadata) Do(bmd *domains.BigMapDiff, storage *ast.TypedAst) (bool tokenMetadata, err := tokenParser.ParseBigMapDiff(bmd, storage) if err != nil { if !errors.Is(err, tokens.ErrNoMetadataKeyInStorage) { - logger.With(bmd).Error(err) + logger.Err(err) } return false, nil, nil } diff --git a/internal/handlers/tzip.go b/internal/handlers/tzip.go index 2c1db1cbc..9d1906f05 100644 --- a/internal/handlers/tzip.go +++ b/internal/handlers/tzip.go @@ -53,7 +53,7 @@ func (t *TZIP) handle(bmd *domains.BigMapDiff) ([]models.Model, error) { BigMapDiff: *bmd.BigMapDiff, }) if err != nil { - logger.With(bmd).Warn(err) + logger.Warning().Fields(bmd.LogFields()).Err(err).Msg("") return nil, nil } if model == nil { diff --git a/internal/helpers/json.go b/internal/helpers/json.go index 9679f67a6..ae1b35d23 100644 --- a/internal/helpers/json.go +++ b/internal/helpers/json.go @@ -14,11 +14,11 @@ func AreEqualJSON(s1, s2 string) (bool, error) { var err error err = json.Unmarshal([]byte(s1), &o1) if err != nil { - return false, fmt.Errorf("Error mashalling string 1 :: %s", err.Error()) + return false, fmt.Errorf("error mashalling string 1 :: %s", err.Error()) } err = json.Unmarshal([]byte(s2), &o2) if err != nil { - return false, fmt.Errorf("Error mashalling string 2 :: %s", err.Error()) + return false, fmt.Errorf("error mashalling string 2 :: %s", err.Error()) } return reflect.DeepEqual(o1, o2), nil diff --git a/internal/helpers/sentry.go b/internal/helpers/sentry.go index 302d59622..48dfdab84 100644 --- a/internal/helpers/sentry.go +++ b/internal/helpers/sentry.go @@ -19,12 +19,12 @@ func InitSentry(debug bool, environment, dsn string) { AttachStacktrace: true, BeforeSend: beforeSend, }); err != nil { - logger.Info("Sentry initialization failed: %v\n", err) + logger.Info().Msgf("Sentry initialization failed: %v\n", err) } } func beforeSend(event *sentry.Event, hint *sentry.EventHint) *sentry.Event { - logger.Info("[Sentry message] %s", event.Message) + logger.Info().Msgf("[Sentry message] %s", event.Message) return event } diff --git a/internal/logger/logger.go b/internal/logger/logger.go index aae8a86ca..38b33038c 100644 --- a/internal/logger/logger.go +++ b/internal/logger/logger.go @@ -3,102 +3,61 @@ package logger import ( "bufio" "bytes" - "encoding/json" "fmt" "os" "runtime" "strings" - "github.com/baking-bad/bcdhub/internal/models/types" "github.com/fatih/color" - "github.com/sirupsen/logrus" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" ) // Loggable - type Loggable interface { - LogFields() logrus.Fields + LogFields() map[string]interface{} } var logger = newBCDLogger() -// BCDLogger - -type BCDLogger struct { - *logrus.Logger -} - -func newBCDLogger() *BCDLogger { - l := &BCDLogger{ - Logger: logrus.New(), - } +func newBCDLogger() zerolog.Logger { + consoleWriter := zerolog.ConsoleWriter{Out: os.Stderr} - formatter := new(logrus.TextFormatter) switch os.Getenv("BCD_ENV") { case "development": - formatter.FullTimestamp = true - formatter.TimestampFormat = "2006-01-02 15:04:05" + consoleWriter.TimeFormat = "2006-01-02 15:04:05" default: - formatter.DisableTimestamp = true + consoleWriter.FormatTimestamp = func(i interface{}) string { + return "" + } } - - l.SetFormatter(formatter) - l.SetOutput(os.Stdout) - l.SetLevel(logrus.InfoLevel) - return l -} - -// Error - -func Error(err error) { - logger.Error(err) -} - -// Errorf - -func Errorf(format string, args ...interface{}) { - logger.Errorf(format, args...) + zerolog.SetGlobalLevel(zerolog.InfoLevel) + return log.Output(consoleWriter) } // Info - -func Info(format string, args ...interface{}) { - logger.Infof(format, args...) +func Info() *zerolog.Event { + return logger.Info() } // Warning - -func Warning(format string, args ...interface{}) { - logger.Warnf(format, args...) -} - -// Fatal - -func Fatal(err error) { - logger.Fatal(err) - os.Exit(1) +func Warning() *zerolog.Event { + return logger.Warn() } -// Log - -func Log(text string) { - logger.Print(text) -} - -// Logf - -func Logf(format string, args ...interface{}) { - logger.Printf(format, args...) +// Error - +func Error() *zerolog.Event { + return logger.Error() } -// JSON - pretty json log -func JSON(data string) { - var pretty bytes.Buffer - if err := json.Indent(&pretty, []byte(data), "", " "); err != nil { - Error(err) - } else { - Info(pretty.String()) - } +// Err - +func Err(err error) { + logger.Error().Err(err).Msg("") } -// InterfaceToJSON - pretty json log -func InterfaceToJSON(data interface{}) { - if result, err := json.MarshalIndent(data, "", " "); err != nil { - Error(err) - } else { - Info(string(result)) - } +// Fatal - +func Fatal() *zerolog.Event { + return logger.Fatal() } // Debug - @@ -180,20 +139,3 @@ func Question(format string, v ...interface{}) { blue := color.New(color.FgMagenta).SprintFunc() fmt.Printf("[%s] %s", blue("?"), fmt.Sprintf(format, v...)) } - -// With - -func With(entry Loggable) *logrus.Entry { - return logger.WithFields(entry.LogFields()) -} - -// WithNetwork - -func WithNetwork(network types.Network) *logrus.Entry { - return logger.WithField("network", network.String()) -} - -// WithField - -func WithField(name string, value interface{}) *logrus.Entry { - return logger.WithFields(logrus.Fields{ - name: value, - }) -} diff --git a/internal/models/bigmapdiff/bigmapstate.go b/internal/models/bigmapdiff/bigmapstate.go index c38e51028..361a193b1 100644 --- a/internal/models/bigmapdiff/bigmapstate.go +++ b/internal/models/bigmapdiff/bigmapstate.go @@ -4,7 +4,6 @@ import ( "time" "github.com/baking-bad/bcdhub/internal/models/types" - "github.com/sirupsen/logrus" "gorm.io/gorm" "gorm.io/gorm/clause" ) @@ -87,8 +86,8 @@ func (b *BigMapState) MarshalToQueue() ([]byte, error) { } // LogFields - -func (b *BigMapState) LogFields() logrus.Fields { - return logrus.Fields{ +func (b *BigMapState) LogFields() map[string]interface{} { + return map[string]interface{}{ "network": b.Network.String(), "ptr": b.Ptr, "key_hash": b.KeyHash, diff --git a/internal/models/bigmapdiff/model.go b/internal/models/bigmapdiff/model.go index 806b1072c..d42d3cfea 100644 --- a/internal/models/bigmapdiff/model.go +++ b/internal/models/bigmapdiff/model.go @@ -6,7 +6,6 @@ import ( "github.com/baking-bad/bcdhub/internal/models/types" "github.com/lib/pq" - "github.com/sirupsen/logrus" "gorm.io/gorm" "gorm.io/gorm/clause" ) @@ -57,8 +56,8 @@ func (b *BigMapDiff) MarshalToQueue() ([]byte, error) { } // LogFields - -func (b *BigMapDiff) LogFields() logrus.Fields { - return logrus.Fields{ +func (b *BigMapDiff) LogFields() map[string]interface{} { + return map[string]interface{}{ "network": b.Network.String(), "contract": b.Contract, "ptr": b.Ptr, diff --git a/internal/models/contract/model.go b/internal/models/contract/model.go index 087849a29..5601e32c8 100644 --- a/internal/models/contract/model.go +++ b/internal/models/contract/model.go @@ -6,7 +6,6 @@ import ( "github.com/baking-bad/bcdhub/internal/models/types" "github.com/lib/pq" - "github.com/sirupsen/logrus" "gorm.io/gorm" "gorm.io/gorm/clause" ) @@ -73,8 +72,8 @@ func (c *Contract) GetQueues() []string { } // LogFields - -func (c *Contract) LogFields() logrus.Fields { - return logrus.Fields{ +func (c *Contract) LogFields() map[string]interface{} { + return map[string]interface{}{ "network": c.Network.String(), "address": c.Address, "block": c.Level, diff --git a/internal/models/dapp/model.go b/internal/models/dapp/model.go index aad46aac8..8c292ef88 100644 --- a/internal/models/dapp/model.go +++ b/internal/models/dapp/model.go @@ -5,7 +5,6 @@ import ( "encoding/json" "github.com/lib/pq" - "github.com/sirupsen/logrus" "gorm.io/gorm" "gorm.io/gorm/clause" ) @@ -60,8 +59,8 @@ func (d *DApp) MarshalToQueue() ([]byte, error) { } // LogFields - -func (d *DApp) LogFields() logrus.Fields { - return logrus.Fields{ +func (d *DApp) LogFields() map[string]interface{} { + return map[string]interface{}{ "name": d.Name, } } diff --git a/internal/models/migration/model.go b/internal/models/migration/model.go index bb5e29532..30172355b 100644 --- a/internal/models/migration/model.go +++ b/internal/models/migration/model.go @@ -5,7 +5,6 @@ import ( "time" "github.com/baking-bad/bcdhub/internal/models/types" - "github.com/sirupsen/logrus" "gorm.io/gorm" "gorm.io/gorm/clause" ) @@ -51,8 +50,8 @@ func (m *Migration) MarshalToQueue() ([]byte, error) { } // LogFields - -func (m *Migration) LogFields() logrus.Fields { - return logrus.Fields{ +func (m *Migration) LogFields() map[string]interface{} { + return map[string]interface{}{ "network": m.Network.String(), "address": m.Address, "block": m.Level, diff --git a/internal/models/operation/model.go b/internal/models/operation/model.go index 471a61f5c..209440548 100644 --- a/internal/models/operation/model.go +++ b/internal/models/operation/model.go @@ -13,7 +13,6 @@ import ( "github.com/baking-bad/bcdhub/internal/models/types" "github.com/lib/pq" "github.com/shopspring/decimal" - "github.com/sirupsen/logrus" "gorm.io/gorm" "gorm.io/gorm/clause" ) @@ -97,8 +96,8 @@ func (o *Operation) MarshalToQueue() ([]byte, error) { } // LogFields - -func (o *Operation) LogFields() logrus.Fields { - return logrus.Fields{ +func (o *Operation) LogFields() map[string]interface{} { + return map[string]interface{}{ "network": o.Network.String(), "hash": o.Hash, "block": o.Level, diff --git a/internal/models/tokenbalance/model.go b/internal/models/tokenbalance/model.go index 78242f519..c38469f1b 100644 --- a/internal/models/tokenbalance/model.go +++ b/internal/models/tokenbalance/model.go @@ -3,7 +3,6 @@ package tokenbalance import ( "github.com/baking-bad/bcdhub/internal/models/types" "github.com/shopspring/decimal" - "github.com/sirupsen/logrus" "gorm.io/gorm" "gorm.io/gorm/clause" ) @@ -66,8 +65,8 @@ func (tb *TokenBalance) MarshalToQueue() ([]byte, error) { } // LogFields - -func (tb *TokenBalance) LogFields() logrus.Fields { - return logrus.Fields{ +func (tb *TokenBalance) LogFields() map[string]interface{} { + return map[string]interface{}{ "network": tb.Network.String(), "address": tb.Address, "contract": tb.Contract, diff --git a/internal/models/tokenmetadata/model.go b/internal/models/tokenmetadata/model.go index 2006edcd2..f674e8209 100644 --- a/internal/models/tokenmetadata/model.go +++ b/internal/models/tokenmetadata/model.go @@ -5,7 +5,6 @@ import ( "github.com/baking-bad/bcdhub/internal/models/types" "github.com/lib/pq" - "github.com/sirupsen/logrus" "gorm.io/gorm" "gorm.io/gorm/clause" ) @@ -93,8 +92,8 @@ func (t *TokenMetadata) MarshalToQueue() ([]byte, error) { } // LogFields - -func (t *TokenMetadata) LogFields() logrus.Fields { - return logrus.Fields{ +func (t *TokenMetadata) LogFields() map[string]interface{} { + return map[string]interface{}{ "network": t.Network.String(), "contract": t.Contract, "token_id": t.TokenID, diff --git a/internal/models/transfer/model.go b/internal/models/transfer/model.go index 3fc6a1603..4670af97f 100644 --- a/internal/models/transfer/model.go +++ b/internal/models/transfer/model.go @@ -7,7 +7,6 @@ import ( "github.com/baking-bad/bcdhub/internal/models/tokenbalance" "github.com/baking-bad/bcdhub/internal/models/types" "github.com/shopspring/decimal" - "github.com/sirupsen/logrus" "gorm.io/gorm" "gorm.io/gorm/clause" ) @@ -58,8 +57,8 @@ func (t *Transfer) MarshalToQueue() ([]byte, error) { } // LogFields - -func (t *Transfer) LogFields() logrus.Fields { - return logrus.Fields{ +func (t *Transfer) LogFields() map[string]interface{} { + return map[string]interface{}{ "network": t.Network.String(), "contract": t.Contract, "block": t.Level, diff --git a/internal/models/tzip/model.go b/internal/models/tzip/model.go index 2793a6d10..f086fa219 100644 --- a/internal/models/tzip/model.go +++ b/internal/models/tzip/model.go @@ -4,7 +4,6 @@ import ( "time" "github.com/baking-bad/bcdhub/internal/models/types" - "github.com/sirupsen/logrus" "gorm.io/gorm" "gorm.io/gorm/clause" ) @@ -54,8 +53,8 @@ func (t *TZIP) MarshalToQueue() ([]byte, error) { } // LogFields - -func (t *TZIP) LogFields() logrus.Fields { - return logrus.Fields{ +func (t *TZIP) LogFields() map[string]interface{} { + return map[string]interface{}{ "network": t.Network, "address": t.Address, "level": t.Level, diff --git a/internal/mq/nats.go b/internal/mq/nats.go index c4e3d756f..3d33d4bdd 100644 --- a/internal/mq/nats.go +++ b/internal/mq/nats.go @@ -71,7 +71,7 @@ func WaitNewNats(service, url string, timeout int, queues ...Queue) *Nats { for n == nil { n, err = NewNats(service, url, queues...) if err != nil { - logger.Warning("Waiting mq up %d seconds...", timeout) + logger.Warning().Msgf("Waiting mq up %d seconds...", timeout) time.Sleep(time.Second * time.Duration(timeout)) } } diff --git a/internal/mq/rabbit.go b/internal/mq/rabbit.go index 991d6c77d..ac38e8f14 100644 --- a/internal/mq/rabbit.go +++ b/internal/mq/rabbit.go @@ -107,7 +107,7 @@ func WaitNewRabbit(connection, service string, needPublisher bool, timeout int, for qm == nil { qm, err = NewQueueManager(connection, service, needPublisher, queues...) if err != nil { - logger.Warning("Waiting mq up %d seconds...", timeout) + logger.Warning().Msgf("Waiting mq up %d seconds...", timeout) time.Sleep(time.Second * time.Duration(timeout)) } } diff --git a/internal/noderpc/rpc.go b/internal/noderpc/rpc.go index b257dd4fc..eeda6b410 100644 --- a/internal/noderpc/rpc.go +++ b/internal/noderpc/rpc.go @@ -60,7 +60,7 @@ func NewWaitNodeRPC(baseURL string, opts ...NodeOption) *NodeRPC { break } - logger.Warning("Waiting node %s up 30 second...", baseURL) + logger.Warning().Msgf("Waiting node %s up 30 second...", baseURL) time.Sleep(time.Second * 30) } return node @@ -106,7 +106,7 @@ func (rpc *NodeRPC) makeRequest(req *http.Request) (*http.Response, error) { for ; count < rpc.retryCount; count++ { resp, err := client.Do(req) if err != nil { - logger.Warning("Attempt #%d: %s", count+1, err.Error()) + logger.Warning().Msgf("Attempt #%d: %s", count+1, err.Error()) continue } return resp, err diff --git a/internal/parsers/ledger/ledger.go b/internal/parsers/ledger/ledger.go index 2f78e184f..f2bfa2323 100644 --- a/internal/parsers/ledger/ledger.go +++ b/internal/parsers/ledger/ledger.go @@ -149,7 +149,7 @@ func (ledger *Ledger) makeTransfer(tb tokenbalance.TokenBalance, st *stacktrace. balance, err := ledger.tokenBalances.Get(op.Network, op.Destination, tb.Address, tb.TokenID) if err != nil { - logger.Error(err) + logger.Err(err) return nil } diff --git a/internal/parsers/operations/migration.go b/internal/parsers/operations/migration.go index d34129c79..310e7ff93 100644 --- a/internal/parsers/operations/migration.go +++ b/internal/parsers/operations/migration.go @@ -54,7 +54,7 @@ func (m Migration) Parse(data noderpc.Operation, operation *operation.Operation) Hash: operation.Hash, Kind: types.MigrationKindLambda, } - logger.With(migration).Info("Migration detected") + logger.Info().Fields(migration.LogFields()).Msg("Migration detected") return migration, nil } } diff --git a/internal/parsers/operations/origination.go b/internal/parsers/operations/origination.go index be943f5b9..997a8fefd 100644 --- a/internal/parsers/operations/origination.go +++ b/internal/parsers/operations/origination.go @@ -110,7 +110,7 @@ func (p Origination) appliedHandler(item noderpc.Operation, origination *operati if err := p.executeInitialStorageEvent(item.Script, origination, result); err != nil { if !errors.Is(err, tokens.ErrNoMetadataKeyInStorage) { - logger.Error(err) + logger.Err(err) } } diff --git a/internal/parsers/operations/test_common.go b/internal/parsers/operations/test_common.go index 9c739abdb..3a2adc7c5 100644 --- a/internal/parsers/operations/test_common.go +++ b/internal/parsers/operations/test_common.go @@ -106,47 +106,47 @@ func compareParserResponse(t *testing.T, got, want *parsers.Result) bool { func compareTransfers(one, two *transfer.Transfer) bool { if one.Network != two.Network { - logger.Info("Network: %s != %s", one.Network, two.Network) + logger.Info().Msgf("Network: %s != %s", one.Network, two.Network) return false } if one.Contract != two.Contract { - logger.Info("Contract: %s != %s", one.Contract, two.Contract) + logger.Info().Msgf("Contract: %s != %s", one.Contract, two.Contract) return false } if one.Initiator != two.Initiator { - logger.Info("Initiator: %s != %s", one.Initiator, two.Initiator) + logger.Info().Msgf("Initiator: %s != %s", one.Initiator, two.Initiator) return false } if one.Status != two.Status { - logger.Info("Status: %s != %s", one.Status, two.Status) + logger.Info().Msgf("Status: %s != %s", one.Status, two.Status) return false } if one.Timestamp != two.Timestamp { - logger.Info("Timestamp: %s != %s", one.Timestamp, two.Timestamp) + logger.Info().Msgf("Timestamp: %s != %s", one.Timestamp, two.Timestamp) return false } if one.Level != two.Level { - logger.Info("Level: %d != %d", one.Level, two.Level) + logger.Info().Msgf("Level: %d != %d", one.Level, two.Level) return false } if one.From != two.From { - logger.Info("From: %s != %s", one.From, two.From) + logger.Info().Msgf("From: %s != %s", one.From, two.From) return false } if one.To != two.To { - logger.Info("To: %s != %s", one.To, two.To) + logger.Info().Msgf("To: %s != %s", one.To, two.To) return false } if one.TokenID != two.TokenID { - logger.Info("TokenID: %d != %d", one.TokenID, two.TokenID) + logger.Info().Msgf("TokenID: %d != %d", one.TokenID, two.TokenID) return false } if one.Amount.Cmp(two.Amount) != 0 { - logger.Info("Amount: %s != %s", one.Amount.String(), two.Amount.String()) + logger.Info().Msgf("Amount: %s != %s", one.Amount.String(), two.Amount.String()) return false } if one.OperationID != two.OperationID { - logger.Info("OperationID: %d != %d", one.OperationID, two.OperationID) + logger.Info().Msgf("OperationID: %d != %d", one.OperationID, two.OperationID) return false } return true @@ -154,112 +154,112 @@ func compareTransfers(one, two *transfer.Transfer) bool { func compareOperations(t *testing.T, one, two *operation.Operation) bool { if one.Internal != two.Internal { - logger.Info("Internal: %v != %v", one.Internal, two.Internal) + logger.Info().Msgf("Internal: %v != %v", one.Internal, two.Internal) return false } if !compareInt64Ptr(one.Nonce, two.Nonce) { - logger.Info("Operation.Nonce: %d != %d", *one.Nonce, *two.Nonce) + logger.Info().Msgf("Operation.Nonce: %d != %d", *one.Nonce, *two.Nonce) return false } if one.Timestamp != two.Timestamp { - logger.Info("Timestamp: %s != %s", one.Timestamp, two.Timestamp) + logger.Info().Msgf("Timestamp: %s != %s", one.Timestamp, two.Timestamp) return false } if one.Level != two.Level { - logger.Info("Level: %d != %d", one.Level, two.Level) + logger.Info().Msgf("Level: %d != %d", one.Level, two.Level) return false } if one.ContentIndex != two.ContentIndex { - logger.Info("ContentIndex: %d != %d", one.ContentIndex, two.ContentIndex) + logger.Info().Msgf("ContentIndex: %d != %d", one.ContentIndex, two.ContentIndex) return false } if one.Counter != two.Counter { - logger.Info("Counter: %d != %d", one.Counter, two.Counter) + logger.Info().Msgf("Counter: %d != %d", one.Counter, two.Counter) return false } if one.GasLimit != two.GasLimit { - logger.Info("GasLimit: %d != %d", one.GasLimit, two.GasLimit) + logger.Info().Msgf("GasLimit: %d != %d", one.GasLimit, two.GasLimit) return false } if one.StorageLimit != two.StorageLimit { - logger.Info("StorageLimit: %d != %d", one.StorageLimit, two.StorageLimit) + logger.Info().Msgf("StorageLimit: %d != %d", one.StorageLimit, two.StorageLimit) return false } if one.Fee != two.Fee { - logger.Info("Fee: %d != %d", one.Fee, two.Fee) + logger.Info().Msgf("Fee: %d != %d", one.Fee, two.Fee) return false } if one.Amount != two.Amount { - logger.Info("Amount: %d != %d", one.Amount, two.Amount) + logger.Info().Msgf("Amount: %d != %d", one.Amount, two.Amount) return false } if one.Burned != two.Burned { - logger.Info("Burned: %d != %d", one.Burned, two.Burned) + logger.Info().Msgf("Burned: %d != %d", one.Burned, two.Burned) return false } if one.AllocatedDestinationContractBurned != two.AllocatedDestinationContractBurned { - logger.Info("AllocatedDestinationContractBurned: %d != %d", one.AllocatedDestinationContractBurned, two.AllocatedDestinationContractBurned) + logger.Info().Msgf("AllocatedDestinationContractBurned: %d != %d", one.AllocatedDestinationContractBurned, two.AllocatedDestinationContractBurned) return false } if one.Network != two.Network { - logger.Info("Network: %s != %s", one.Network, two.Network) + logger.Info().Msgf("Network: %s != %s", one.Network, two.Network) return false } if one.ProtocolID != two.ProtocolID { - logger.Info("Protocol: %d != %d", one.ProtocolID, two.ProtocolID) + logger.Info().Msgf("Protocol: %d != %d", one.ProtocolID, two.ProtocolID) return false } if one.Hash != two.Hash { - logger.Info("Hash: %s != %s", one.Hash, two.Hash) + logger.Info().Msgf("Hash: %s != %s", one.Hash, two.Hash) return false } if one.Status != two.Status { - logger.Info("Status: %s != %s", one.Status, two.Status) + logger.Info().Msgf("Status: %s != %s", one.Status, two.Status) return false } if one.Kind != two.Kind { - logger.Info("Kind: %s != %s", one.Kind, two.Kind) + logger.Info().Msgf("Kind: %s != %s", one.Kind, two.Kind) return false } if one.Initiator != two.Initiator { - logger.Info("Initiator: %s != %s", one.Initiator, two.Initiator) + logger.Info().Msgf("Initiator: %s != %s", one.Initiator, two.Initiator) return false } if one.Source != two.Source { - logger.Info("Source: %s != %s", one.Source, two.Source) + logger.Info().Msgf("Source: %s != %s", one.Source, two.Source) return false } if one.Destination != two.Destination { - logger.Info("Destination: %s != %s", one.Destination, two.Destination) + logger.Info().Msgf("Destination: %s != %s", one.Destination, two.Destination) return false } if one.Delegate != two.Delegate { - logger.Info("Delegate: %s != %s", one.Delegate, two.Delegate) + logger.Info().Msgf("Delegate: %s != %s", one.Delegate, two.Delegate) return false } if one.Entrypoint != two.Entrypoint { - logger.Info("Entrypoint: %s != %s", one.Entrypoint, two.Entrypoint) + logger.Info().Msgf("Entrypoint: %s != %s", one.Entrypoint, two.Entrypoint) return false } if len(one.Parameters) > 0 && len(two.Parameters) > 0 { if !assert.JSONEq(t, string(one.Parameters), string(two.Parameters)) { - logger.Info("Parameters: %s != %s", one.Parameters, two.Parameters) + logger.Info().Msgf("Parameters: %s != %s", one.Parameters, two.Parameters) return false } } if len(one.DeffatedStorage) > 0 && len(two.DeffatedStorage) > 0 { if !assert.JSONEq(t, string(one.DeffatedStorage), string(two.DeffatedStorage)) { - logger.Info("DeffatedStorage: %s != %s", one.DeffatedStorage, two.DeffatedStorage) + logger.Info().Msgf("DeffatedStorage: %s != %s", one.DeffatedStorage, two.DeffatedStorage) return false } } if one.Tags != two.Tags { - logger.Info("Tags: %d != %d", one.Tags, two.Tags) + logger.Info().Msgf("Tags: %d != %d", one.Tags, two.Tags) return false } if len(one.Transfers) != len(two.Transfers) { - logger.Info("Transfers length: %d != %d", len(one.Transfers), len(two.Transfers)) + logger.Info().Msgf("Transfers length: %d != %d", len(one.Transfers), len(two.Transfers)) return false } @@ -272,7 +272,7 @@ func compareOperations(t *testing.T, one, two *operation.Operation) bool { } if len(one.BigMapDiffs) != len(two.BigMapDiffs) { - logger.Info("BigMapDiffs length: %d != %d", len(one.BigMapDiffs), len(two.BigMapDiffs)) + logger.Info().Msgf("BigMapDiffs length: %d != %d", len(one.BigMapDiffs), len(two.BigMapDiffs)) return false } @@ -289,11 +289,11 @@ func compareOperations(t *testing.T, one, two *operation.Operation) bool { func compareBigMapDiff(t *testing.T, one, two *bigmapdiff.BigMapDiff) bool { if one.Contract != two.Contract { - logger.Info("BigMapDiff.Address: %s != %s", one.Contract, two.Contract) + logger.Info().Msgf("BigMapDiff.Address: %s != %s", one.Contract, two.Contract) return false } if one.KeyHash != two.KeyHash { - logger.Info("KeyHash: %s != %s", one.KeyHash, two.KeyHash) + logger.Info().Msgf("KeyHash: %s != %s", one.KeyHash, two.KeyHash) return false } if len(one.Value) > 0 || len(two.Value) > 0 { @@ -302,41 +302,41 @@ func compareBigMapDiff(t *testing.T, one, two *bigmapdiff.BigMapDiff) bool { } } if one.Level != two.Level { - logger.Info("Level: %d != %d", one.Level, two.Level) + logger.Info().Msgf("Level: %d != %d", one.Level, two.Level) return false } if one.Network != two.Network { - logger.Info("Network: %s != %s", one.Network, two.Network) + logger.Info().Msgf("Network: %s != %s", one.Network, two.Network) return false } if one.Timestamp != two.Timestamp { - logger.Info("Timestamp: %s != %s", one.Timestamp, two.Timestamp) + logger.Info().Msgf("Timestamp: %s != %s", one.Timestamp, two.Timestamp) return false } if one.ProtocolID != two.ProtocolID { - logger.Info("Protocol: %d != %d", one.ProtocolID, two.ProtocolID) + logger.Info().Msgf("Protocol: %d != %d", one.ProtocolID, two.ProtocolID) return false } if !assert.JSONEq(t, string(one.KeyBytes()), string(two.KeyBytes())) { return false } if len(one.KeyStrings) != len(two.KeyStrings) { - logger.Info("KeyStrings: %v != %v", one.KeyStrings, two.KeyStrings) + logger.Info().Msgf("KeyStrings: %v != %v", one.KeyStrings, two.KeyStrings) return false } for i := range one.KeyStrings { if one.KeyStrings[i] != two.KeyStrings[i] { - logger.Info("KeyStrings[i]: %v != %v", one.KeyStrings[i], two.KeyStrings[i]) + logger.Info().Msgf("KeyStrings[i]: %v != %v", one.KeyStrings[i], two.KeyStrings[i]) return false } } if len(one.ValueStrings) != len(two.ValueStrings) { - logger.Info("ValueStrings: %v != %v", one.ValueStrings, two.ValueStrings) + logger.Info().Msgf("ValueStrings: %v != %v", one.ValueStrings, two.ValueStrings) return false } for i := range one.ValueStrings { if one.ValueStrings[i] != two.ValueStrings[i] { - logger.Info("ValueStrings[i]: %v != %v", one.ValueStrings[i], two.ValueStrings[i]) + logger.Info().Msgf("ValueStrings[i]: %v != %v", one.ValueStrings[i], two.ValueStrings[i]) return false } } @@ -345,31 +345,31 @@ func compareBigMapDiff(t *testing.T, one, two *bigmapdiff.BigMapDiff) bool { func compareBigMapAction(one, two *bigmapaction.BigMapAction) bool { if one.Action != two.Action { - logger.Info("Action: %s != %s", one.Action, two.Action) + logger.Info().Msgf("Action: %s != %s", one.Action, two.Action) return false } if !compareInt64Ptr(one.SourcePtr, two.SourcePtr) { - logger.Info("SourcePtr: %d != %d", *one.SourcePtr, *two.SourcePtr) + logger.Info().Msgf("SourcePtr: %d != %d", *one.SourcePtr, *two.SourcePtr) return false } if !compareInt64Ptr(one.DestinationPtr, two.DestinationPtr) { - logger.Info("DestinationPtr: %d != %d", *one.DestinationPtr, *two.DestinationPtr) + logger.Info().Msgf("DestinationPtr: %d != %d", *one.DestinationPtr, *two.DestinationPtr) return false } if one.Level != two.Level { - logger.Info("Level: %d != %d", one.Level, two.Level) + logger.Info().Msgf("Level: %d != %d", one.Level, two.Level) return false } if one.Address != two.Address { - logger.Info("BigMapAction.Address: %s != %s", one.Address, two.Address) + logger.Info().Msgf("BigMapAction.Address: %s != %s", one.Address, two.Address) return false } if one.Network != two.Network { - logger.Info("Network: %s != %s", one.Network, two.Network) + logger.Info().Msgf("Network: %s != %s", one.Network, two.Network) return false } if one.Timestamp != two.Timestamp { - logger.Info("Timestamp: %s != %s", one.Timestamp, two.Timestamp) + logger.Info().Msgf("Timestamp: %s != %s", one.Timestamp, two.Timestamp) return false } return true @@ -377,39 +377,39 @@ func compareBigMapAction(one, two *bigmapaction.BigMapAction) bool { func compareContract(one, two *contract.Contract) bool { if one.Network != two.Network { - logger.Info("Contract.Network: %s != %s", one.Network, two.Network) + logger.Info().Msgf("Contract.Network: %s != %s", one.Network, two.Network) return false } if one.Address != two.Address { - logger.Info("Contract.Address: %s != %s", one.Address, two.Address) + logger.Info().Msgf("Contract.Address: %s != %s", one.Address, two.Address) return false } if one.Language != two.Language { - logger.Info("Contract.Language: %s != %s", one.Language, two.Language) + logger.Info().Msgf("Contract.Language: %s != %s", one.Language, two.Language) return false } if one.Hash != two.Hash { - logger.Info("Contract.Hash: %s != %s", one.Hash, two.Hash) + logger.Info().Msgf("Contract.Hash: %s != %s", one.Hash, two.Hash) return false } if one.Manager != two.Manager { - logger.Info("Contract.Manager: %s != %s", one.Manager, two.Manager) + logger.Info().Msgf("Contract.Manager: %s != %s", one.Manager, two.Manager) return false } if one.Level != two.Level { - logger.Info("Contract.Level: %d != %d", one.Level, two.Level) + logger.Info().Msgf("Contract.Level: %d != %d", one.Level, two.Level) return false } if one.Timestamp != two.Timestamp { - logger.Info("Contract.Timestamp: %s != %s", one.Timestamp, two.Timestamp) + logger.Info().Msgf("Contract.Timestamp: %s != %s", one.Timestamp, two.Timestamp) return false } if one.Tags != two.Tags { - logger.Info("Contract.Tags: %d != %d", one.Tags, two.Tags) + logger.Info().Msgf("Contract.Tags: %d != %d", one.Tags, two.Tags) return false } if !compareStringArray(one.Entrypoints, two.Entrypoints) { - logger.Info("Contract.Entrypoints: %v != %v", one.Entrypoints, two.Entrypoints) + logger.Info().Msgf("Contract.Entrypoints: %v != %v", one.Entrypoints, two.Entrypoints) return false } return true diff --git a/internal/parsers/operations/transaction.go b/internal/parsers/operations/transaction.go index 3468c3c61..f72e23e1b 100644 --- a/internal/parsers/operations/transaction.go +++ b/internal/parsers/operations/transaction.go @@ -99,7 +99,7 @@ func (p Transaction) Parse(data noderpc.Operation) (*parsers.Result, error) { if !errors.Is(err, noderpc.InvalidNodeResponse{}) { return nil, err } - logger.With(&tx).Warning(err.Error()) + logger.Warning().Err(err).Msg("") } result.TokenBalances = append(result.TokenBalances, transferParsers.UpdateTokenBalances(tx.Transfers)...) } diff --git a/internal/parsers/stacktrace/stacktrace.go b/internal/parsers/stacktrace/stacktrace.go index 90507a404..4da09eaba 100644 --- a/internal/parsers/stacktrace/stacktrace.go +++ b/internal/parsers/stacktrace/stacktrace.go @@ -151,7 +151,7 @@ func (st *StackTrace) String() string { } if err := st.print(topLevel, 1, &builder); err != nil { - logger.Error(err) + logger.Err(err) } return builder.String() } diff --git a/internal/parsers/transfer/transfer.go b/internal/parsers/transfer/transfer.go index 865dc9bd7..47eaeaa52 100644 --- a/internal/parsers/transfer/transfer.go +++ b/internal/parsers/transfer/transfer.go @@ -170,7 +170,7 @@ func (p *Parser) executeEvents(impl tzip.EventImplementation, name, protocol str func (p *Parser) makeTransfersFromBalanceEvents(event events.Event, ctx events.Context, operation *operation.Operation, isDelta bool) error { balances, err := events.Execute(p.rpc, event, ctx) if err != nil { - logger.Errorf("Event of %s %s: %s", operation.Network, operation.Destination, err.Error()) + logger.Error().Msgf("Event of %s %s: %s", operation.Network, operation.Destination, err.Error()) return nil } diff --git a/internal/parsers/tzip/parser.go b/internal/parsers/tzip/parser.go index b351e04ae..07ea95198 100644 --- a/internal/parsers/tzip/parser.go +++ b/internal/parsers/tzip/parser.go @@ -85,12 +85,12 @@ func (p *Parser) Parse(ctx ParseContext) (*tzip.TZIP, error) { if err := s.Get(ctx.BigMapDiff.Network, ctx.BigMapDiff.Contract, decoded, ctx.BigMapDiff.Ptr, data); err != nil { switch { case errors.Is(err, tzipStorage.ErrHTTPRequest) || errors.Is(err, tzipStorage.ErrJSONDecoding) || errors.Is(err, tzipStorage.ErrUnknownStorageType): - logger.With(&ctx.BigMapDiff).WithField("kind", "contract_metadata").Warning(err) + logger.Warning().Fields(ctx.BigMapDiff.LogFields()).Str("kind", "contract_metadata").Err(err).Msg("") return nil, nil case errors.Is(err, tzipStorage.ErrNoIPFSResponse): data.Description = fmt.Sprintf("Failed to fetch metadata %s", decoded) data.Name = consts.Unknown - logger.WithField("url", decoded).WithField("kind", "contract_metadata").Warning(err) + logger.Warning().Str("url", decoded).Str("kind", "contract_metadata").Err(err).Msg("") default: return nil, err } diff --git a/internal/parsers/tzip/tokens/metadata.go b/internal/parsers/tzip/tokens/metadata.go index 815eda158..96957b16e 100644 --- a/internal/parsers/tzip/tokens/metadata.go +++ b/internal/parsers/tzip/tokens/metadata.go @@ -322,12 +322,12 @@ func (m *TokenMetadata) UnmarshalJSON(data []byte) error { case string: int64Val, err := strconv.ParseInt(decimals, 10, 64) if err != nil { - logger.Errorf("TokenMetadata decimal Unmarshal error with string. Got %##v %T", res[keyDecimals], val) + logger.Error().Msgf("TokenMetadata decimal Unmarshal error with string. Got %##v %T", res[keyDecimals], val) } else { m.Decimals = &int64Val } default: - logger.Errorf("TokenMetadata decimal Unmarshal error. Wanted float64, int64 or (>_<) string, got %##v %T", res[keyDecimals], val) + logger.Error().Msgf("TokenMetadata decimal Unmarshal error. Wanted float64, int64 or (>_<) string, got %##v %T", res[keyDecimals], val) } delete(res, keyDecimals) } diff --git a/internal/parsers/tzip/tokens/parser.go b/internal/parsers/tzip/tokens/parser.go index d27c60890..70d28bd5c 100644 --- a/internal/parsers/tzip/tokens/parser.go +++ b/internal/parsers/tzip/tokens/parser.go @@ -85,11 +85,11 @@ func (t Parser) ParseBigMapDiff(bmd *domains.BigMapDiff, storage *ast.TypedAst) if err := s.Get(t.network, bmd.Contract, m.Link, bmd.Ptr, remoteMetadata); err != nil { switch { case errors.Is(err, tzipStorage.ErrHTTPRequest): - logger.WithField("url", m.Link).WithField("kind", "token_metadata").Warning(err) + logger.Warning().Str("url", m.Link).Str("kind", "token_metadata").Err(err).Msg("") return nil, nil case errors.Is(err, tzipStorage.ErrNoIPFSResponse): remoteMetadata.Name = consts.Unknown - logger.WithField("url", m.Link).WithField("kind", "token_metadata").Warning(err) + logger.Warning().Str("url", m.Link).Str("kind", "token_metadata").Err(err).Msg("") default: return nil, err } @@ -140,7 +140,7 @@ func (t Parser) parse(address string, state block.Block) ([]tokenmetadata.TokenM remoteMetadata := &TokenMetadata{} if err := s.Get(t.network, address, m.Link, ptr, remoteMetadata); err != nil { if errors.Is(err, tzipStorage.ErrHTTPRequest) { - logger.Error(err) + logger.Err(err) return nil, nil } return nil, err diff --git a/internal/postgres/core/postgres.go b/internal/postgres/core/postgres.go index 0469e42da..d040da8b8 100644 --- a/internal/postgres/core/postgres.go +++ b/internal/postgres/core/postgres.go @@ -65,7 +65,7 @@ func WaitNew(connectionString, appName string, timeout int, opts ...PostgresOpti for db == nil { db, err = New(connectionString, appName, opts...) if err != nil { - bcdLogger.Warning("Waiting postgres up %d seconds...", timeout) + bcdLogger.Warning().Msgf("Waiting postgres up %d seconds...", timeout) time.Sleep(time.Second * time.Duration(timeout)) } } diff --git a/internal/rollback/rollback.go b/internal/rollback/rollback.go index a9f0ef130..6cd4d6ec1 100644 --- a/internal/rollback/rollback.go +++ b/internal/rollback/rollback.go @@ -44,7 +44,7 @@ func (rm Manager) Rollback(db *gorm.DB, fromState block.Block, toLevel int64) er } for level := fromState.Level - 1; level >= toLevel; level-- { - logger.Info("Rollback to %d block", level) + logger.Info().Msgf("Rollback to %d block", level) err := db.Transaction(func(tx *gorm.DB) error { if err := rm.rollbackTokenBalances(tx, fromState.Network, level); err != nil { diff --git a/internal/tzkt/request.go b/internal/tzkt/request.go index 121f4f90c..de8b11787 100644 --- a/internal/tzkt/request.go +++ b/internal/tzkt/request.go @@ -53,7 +53,7 @@ func (t *TzKT) request(method, endpoint string, params map[string]string, respon count := 0 for ; count < t.retryCount; count++ { if resp, err = t.client.Do(req); err != nil { - logger.Warning("Attempt #%d: %s", count+1, err.Error()) + logger.Warning().Msgf("Attempt #%d: %s", count+1, err.Error()) continue } break diff --git a/internal/tzkt/services.go b/internal/tzkt/services.go index 9d07fc42f..5f814eb40 100644 --- a/internal/tzkt/services.go +++ b/internal/tzkt/services.go @@ -50,7 +50,7 @@ func (t *ServicesTzKT) request(method, endpoint string, params map[string]string count := 0 for ; count < t.retryCount; count++ { if resp, err = t.client.Do(req); err != nil { - logger.Warning("Attempt #%d: %s", count+1, err.Error()) + logger.Warning().Msgf("Attempt #%d: %s", count+1, err.Error()) continue } break diff --git a/scripts/api_tester/account.go b/scripts/api_tester/account.go index dd58e7626..373790846 100644 --- a/scripts/api_tester/account.go +++ b/scripts/api_tester/account.go @@ -21,17 +21,17 @@ func testAccounts(ctx *config.Context) { for _, address := range tokenContracts { balances, err := ctx.TokenBalances.GetHolders(types.Mainnet, address, 0) if err != nil { - logger.Error(err) + logger.Err(err) return } for i := range balances { path := fmt.Sprintf("account/mainnet/%s", balances[i].Address) if err := request(path); err != nil { - logger.Error(err) + logger.Err(err) } if err := request(fmt.Sprintf("%s/token_balances", path)); err != nil { - logger.Error(err) + logger.Err(err) } } } diff --git a/scripts/api_tester/bigmapdiff.go b/scripts/api_tester/bigmapdiff.go index dba5d26bc..997cdf7ae 100644 --- a/scripts/api_tester/bigmapdiff.go +++ b/scripts/api_tester/bigmapdiff.go @@ -12,16 +12,16 @@ func testBigMapDiff(ctx *config.Context) { for _, network := range ctx.Config.API.Networks { prefix := fmt.Sprintf("bigmap/%s/%d", network, ptr) if err := request(prefix); err != nil { - logger.Error(err) + logger.Err(err) } if err := request(fmt.Sprintf("%s/count", prefix)); err != nil { - logger.Error(err) + logger.Err(err) } if err := request(fmt.Sprintf("%s/history", prefix)); err != nil { - logger.Error(err) + logger.Err(err) } if err := request(fmt.Sprintf("%s/keys", prefix)); err != nil { - logger.Error(err) + logger.Err(err) } } } diff --git a/scripts/api_tester/contract.go b/scripts/api_tester/contract.go index 8aa2d3d19..d819d0961 100644 --- a/scripts/api_tester/contract.go +++ b/scripts/api_tester/contract.go @@ -17,20 +17,20 @@ func testContracts(ctx *config.Context) { offset := int64(0) for _, network := range ctx.Config.API.Networks { - logger.Info("testing %s contract endpoints...", network) + logger.Info().Msgf("testing %s contract endpoints...", network) contracts, err := ctx.Contracts.GetMany(map[string]interface{}{ "network": network, }) if err != nil { - logger.Errorf("testContracts: %s", err.Error()) + logger.Error().Msgf("testContracts: %s", err.Error()) return } total := len(contracts) contracts = contracts[offset:] - logger.Info("testing %d contracts...", len(contracts)) + logger.Info().Msgf("testing %d contracts...", len(contracts)) if len(contracts) == 0 { return } @@ -92,38 +92,38 @@ func testContract(tasks chan contract.Contract, stop chan struct{}, counter *int prefix := fmt.Sprintf("contract/%s/%s", contract.Network, contract.Address) if err := request(prefix); err != nil { - logger.Error(err) + logger.Err(err) } if err := request(fmt.Sprintf("%s/code", prefix)); err != nil { - logger.Error(err) + logger.Err(err) } if err := request(fmt.Sprintf("%s/operations", prefix)); err != nil { - logger.Error(err) + logger.Err(err) } if err := request(fmt.Sprintf("%s/migrations", prefix)); err != nil { - logger.Error(err) + logger.Err(err) } if err := request(fmt.Sprintf("%s/transfers", prefix)); err != nil { - logger.Error(err) + logger.Err(err) } if err := request(fmt.Sprintf("%s/tokens", prefix)); err != nil { - logger.Error(err) + logger.Err(err) } if err := request(fmt.Sprintf("%s/storage", prefix)); err != nil { - logger.Error(err) + logger.Err(err) } if err := request(fmt.Sprintf("%s/same", prefix)); err != nil { - logger.Error(err) + logger.Err(err) } if err := request(fmt.Sprintf("%s/similar", prefix)); err != nil { - logger.Error(err) + logger.Err(err) } if err := request(fmt.Sprintf("%s/entrypoints", prefix)); err != nil { - logger.Error(err) + logger.Err(err) } for i := range contract.Entrypoints { if err := request(fmt.Sprintf("%s/entrypoints/schema?entrypoint=%s", prefix, contract.Entrypoints[i])); err != nil { - logger.Error(err) + logger.Err(err) } } atomic.AddInt64(counter, 1) diff --git a/scripts/api_tester/general.go b/scripts/api_tester/general.go index 40426d6a9..79b43fe1b 100644 --- a/scripts/api_tester/general.go +++ b/scripts/api_tester/general.go @@ -9,24 +9,24 @@ import ( func testGeneral(ctx *config.Context) { if err := request("head"); err != nil { - logger.Error(err) + logger.Err(err) } if err := request("config"); err != nil { - logger.Error(err) + logger.Err(err) } if err := request("pick_random"); err != nil { - logger.Error(err) + logger.Err(err) } if err := request("stats"); err != nil { - logger.Error(err) + logger.Err(err) } for _, network := range ctx.Config.API.Networks { if err := request(fmt.Sprintf("stats/%s", network)); err != nil { - logger.Error(err) + logger.Err(err) } if err := request(fmt.Sprintf("tokens/%s", network)); err != nil { - logger.Error(err) + logger.Err(err) } } } diff --git a/scripts/api_tester/main.go b/scripts/api_tester/main.go index 4d999df73..b46788282 100644 --- a/scripts/api_tester/main.go +++ b/scripts/api_tester/main.go @@ -17,7 +17,8 @@ var ( func main() { cfg, err := config.LoadDefaultConfig() if err != nil { - logger.Fatal(err) + logger.Err(err) + return } ctx := config.NewContext( diff --git a/scripts/bcdctl/main.go b/scripts/bcdctl/main.go index 9ce344d4b..77b215e93 100644 --- a/scripts/bcdctl/main.go +++ b/scripts/bcdctl/main.go @@ -21,7 +21,8 @@ type awsData struct { func main() { cfg, err := config.LoadDefaultConfig() if err != nil { - logger.Fatal(err) + logger.Err(err) + return } creds = awsData{ @@ -45,42 +46,48 @@ func main() { "Rollback state", "Rollback network state to certain level", &rollbackCmd); err != nil { - logger.Fatal(err) + logger.Err(err) + return } if _, err := parser.AddCommand("create_repository", "Create repository", "Create repository", &createRepoCmd); err != nil { - logger.Fatal(err) + logger.Err(err) + return } if _, err := parser.AddCommand("snapshot", "Create snapshot", "Create snapshot", &snapshotCmd); err != nil { - logger.Fatal(err) + logger.Err(err) + return } if _, err := parser.AddCommand("restore", "Restore snapshot", "Restore snapshot", &restoreCmd); err != nil { - logger.Fatal(err) + logger.Err(err) + return } if _, err := parser.AddCommand("set_policy", "Set policy", "Set elastic snapshot policy", &setPolicyCmd); err != nil { - logger.Fatal(err) + logger.Err(err) + return } if _, err := parser.AddCommand("reload_secure_settings", "Reload secure settings", "Reload secure settings", &reloadSecureSettingsCmd); err != nil { - logger.Fatal(err) + logger.Err(err) + return } if _, err := parser.Parse(); err != nil { @@ -99,7 +106,7 @@ func yes() bool { } func askQuestion(question string) (string, error) { - logger.Warning(question) + logger.Warning().Msg(question) reader := bufio.NewReader(os.Stdin) text, err := reader.ReadString('\n') diff --git a/scripts/bcdctl/rollback.go b/scripts/bcdctl/rollback.go index 809c9efa0..5e68e814d 100644 --- a/scripts/bcdctl/rollback.go +++ b/scripts/bcdctl/rollback.go @@ -20,9 +20,9 @@ func (x *rollbackCommand) Execute(_ []string) error { panic(err) } - logger.Warning("Do you want to rollback '%s' from %d to %d? (yes - continue. no - cancel)", state.Network.String(), state.Level, x.Level) + logger.Warning().Msgf("Do you want to rollback '%s' from %d to %d? (yes - continue. no - cancel)", state.Network.String(), state.Level, x.Level) if !yes() { - logger.Info("Cancelled") + logger.Info().Msg("Cancelled") return nil } @@ -30,7 +30,7 @@ func (x *rollbackCommand) Execute(_ []string) error { if err = manager.Rollback(ctx.StorageDB.DB, state, x.Level); err != nil { return err } - logger.Info("Done") + logger.Info().Msg("Done") return nil } diff --git a/scripts/migration/main.go b/scripts/migration/main.go index 5ec12a4ac..6f15224d5 100644 --- a/scripts/migration/main.go +++ b/scripts/migration/main.go @@ -40,12 +40,14 @@ var migrationsList = []migrations.Migration{ func main() { migration, err := chooseMigration() if err != nil { - logger.Fatal(err) + logger.Err(err) + return } cfg, err := config.LoadDefaultConfig() if err != nil { - logger.Fatal(err) + logger.Err(err) + return } start := time.Now() @@ -59,13 +61,14 @@ func main() { ) defer ctx.Close() - logger.Info("Starting %v migration...", migration.Key()) + logger.Info().Msgf("Starting %v migration...", migration.Key()) if err := migration.Do(ctx); err != nil { - logger.Fatal(err) + logger.Err(err) + return } - logger.Info("%s migration done. Spent: %v", migration.Key(), time.Since(start)) + logger.Info().Msgf("%s migration done. Spent: %v", migration.Key(), time.Since(start)) } func chooseMigration() (migrations.Migration, error) { diff --git a/scripts/migration/migrations/big_map_action_to_enum.go b/scripts/migration/migrations/big_map_action_to_enum.go index a5a5cfeea..0adf7ad00 100644 --- a/scripts/migration/migrations/big_map_action_to_enum.go +++ b/scripts/migration/migrations/big_map_action_to_enum.go @@ -31,17 +31,17 @@ func (m *BigMapActionToEnum) Do(ctx *config.Context) error { return nil } - logger.Info("renaming 'action' column to 'old_action'...") + logger.Info().Msg("renaming 'action' column to 'old_action'...") if err := migrator.RenameColumn(model, "action", "old_action"); err != nil { return err } - logger.Info("creating new 'action' column...") + logger.Info().Msg("creating new 'action' column...") if err := migrator.AddColumn(model, "action"); err != nil { return err } - logger.Info("setting 'action' column value...") + logger.Info().Msg("setting 'action' column value...") for _, action := range []types.BigMapAction{ types.BigMapActionAlloc, types.BigMapActionCopy, types.BigMapActionRemove, types.BigMapActionUpdate, } { @@ -49,7 +49,7 @@ func (m *BigMapActionToEnum) Do(ctx *config.Context) error { return err } } - logger.Info("removing 'old_action' column...") + logger.Info().Msg("removing 'old_action' column...") return migrator.DropColumn(model, "old_action") }) diff --git a/scripts/migration/migrations/big_russian_boss.go b/scripts/migration/migrations/big_russian_boss.go index 819ee6aa4..3c5998acb 100644 --- a/scripts/migration/migrations/big_russian_boss.go +++ b/scripts/migration/migrations/big_russian_boss.go @@ -85,13 +85,13 @@ func (m *BigRussianBoss) eventsAndTokenBalances(ctx *config.Context) error { return nil } - logger.Info("executing all extended storages") + logger.Info().Msg("executing all extended storages") extStorageEvents := new(ExtendedStorageEvents) if err := extStorageEvents.Do(ctx); err != nil { return err } - logger.Info("executing all parameter events") + logger.Info().Msg("executing all parameter events") parameterEvents := new(ParameterEvents) if err := parameterEvents.Do(ctx); err != nil { return err @@ -107,7 +107,7 @@ func (m *BigRussianBoss) eventsAndTokenBalances(ctx *config.Context) error { } } - logger.Info("Found %v affected contracts. Starting token balance recalculation", len(uniqueContracts)) + logger.Info().Msgf("Found %v affected contracts. Starting token balance recalculation", len(uniqueContracts)) if err := new(TokenBalanceRecalc).DoBatch(ctx, uniqueContracts); err != nil { return err } diff --git a/scripts/migration/migrations/create_transfers.go b/scripts/migration/migrations/create_transfers.go index 5359829cf..a6f926ada 100644 --- a/scripts/migration/migrations/create_transfers.go +++ b/scripts/migration/migrations/create_transfers.go @@ -30,7 +30,7 @@ func (m *CreateTransfersTags) Description() string { // Do - migrate function func (m *CreateTransfersTags) Do(ctx *config.Context) error { - logger.Info("Starting create transfer migration...") + logger.Info().Msg("Starting create transfer migration...") if err := m.deleteTransfers(ctx); err != nil { return err } @@ -39,7 +39,7 @@ func (m *CreateTransfersTags) Do(ctx *config.Context) error { if err != nil { return err } - logger.Info("Found %d operations with transfer entrypoint", len(operations)) + logger.Info().Msgf("Found %d operations with transfer entrypoint", len(operations)) result := make([]models.Model, 0) newTransfers := make([]*transfer.Transfer, 0) diff --git a/scripts/migration/migrations/create_tzip.go b/scripts/migration/migrations/create_tzip.go index 4cf312991..dabbcc7b4 100644 --- a/scripts/migration/migrations/create_tzip.go +++ b/scripts/migration/migrations/create_tzip.go @@ -29,7 +29,7 @@ func (m *CreateTZIP) Do(ctx *config.Context) error { return err } - logger.Info("Found %d big maps with empty key", len(bmd)) + logger.Info().Msgf("Found %d big maps with empty key", len(bmd)) data := make([]models.Model, 0) bar := progressbar.NewOptions(len(bmd), progressbar.OptionSetPredictTime(false), progressbar.OptionClearOnFinish(), progressbar.OptionShowCount()) diff --git a/scripts/migration/migrations/enum_to_smallint.go b/scripts/migration/migrations/enum_to_smallint.go index b984c21de..8112f0752 100644 --- a/scripts/migration/migrations/enum_to_smallint.go +++ b/scripts/migration/migrations/enum_to_smallint.go @@ -38,7 +38,7 @@ func (m *EnumToSmallInt) Do(ctx *config.Context) error { return ctx.StorageDB.DB.Transaction(func(tx *gorm.DB) error { migrator := tx.Migrator() - logger.Info("drop materialized view: head_stats") + logger.Info().Msg("drop materialized view: head_stats") if err := tx.Exec("DROP MATERIALIZED VIEW IF EXISTS head_stats;").Error; err != nil { return err } @@ -48,7 +48,7 @@ func (m *EnumToSmallInt) Do(ctx *config.Context) error { "series_consumed_gas_by_month_%s", "series_contract_by_month_%s", "series_operation_by_month_%s", "series_paid_storage_size_diff_by_month_%s", } { name := fmt.Sprintf(view, network) - logger.Info("drop materialized view: %s", name) + logger.Info().Msgf("drop materialized view: %s", name) if err := tx.Exec(fmt.Sprintf("DROP MATERIALIZED VIEW IF EXISTS %s;", name)).Error; err != nil { return err } @@ -104,7 +104,7 @@ func (m *EnumToSmallInt) Do(ctx *config.Context) error { func (m *EnumToSmallInt) alterColumn(migrator gorm.Migrator, model interface{}, column string) error { if data, ok := model.(models.Model); ok { - logger.Info("Migrating column '%s' of '%s'", column, data.GetIndex()) + logger.Info().Msgf("Migrating column '%s' of '%s'", column, data.GetIndex()) } if !migrator.HasColumn(model, column) { diff --git a/scripts/migration/migrations/extended_storage_events.go b/scripts/migration/migrations/extended_storage_events.go index edc070488..3c6784302 100644 --- a/scripts/migration/migrations/extended_storage_events.go +++ b/scripts/migration/migrations/extended_storage_events.go @@ -40,9 +40,9 @@ func (m *ExtendedStorageEvents) Do(ctx *config.Context) error { return err } - logger.Info("Found %d tzips", len(tzips)) + logger.Info().Msgf("Found %d tzips", len(tzips)) - logger.Info("Execution events...") + logger.Info().Msg("Execution events...") inserted := make([]models.Model, 0) deleted := make([]models.Model, 0) newTransfers := make([]*transfer.Transfer, 0) @@ -52,7 +52,7 @@ func (m *ExtendedStorageEvents) Do(ctx *config.Context) error { if impl.MichelsonExtendedStorageEvent == nil || impl.MichelsonExtendedStorageEvent.Empty() { continue } - logger.Info("%s...", tzips[i].Address) + logger.Info().Msgf("%s...", tzips[i].Address) protocol, err := ctx.Protocols.Get(tzips[i].Network, "", -1) if err != nil { @@ -112,7 +112,7 @@ func (m *ExtendedStorageEvents) Do(ctx *config.Context) error { } if err := parser.Parse(bmd, proto.Hash, &op); err != nil { if errors.Is(err, noderpc.InvalidNodeResponse{}) { - logger.Error(err) + logger.Err(err) continue } return err @@ -138,12 +138,12 @@ func (m *ExtendedStorageEvents) Do(ctx *config.Context) error { } } } - logger.Info("Delete %d transfers", len(deleted)) + logger.Info().Msgf("Delete %d transfers", len(deleted)) if err := ctx.Storage.BulkDelete(deleted); err != nil { return err } - logger.Info("Found %d transfers", len(inserted)) + logger.Info().Msgf("Found %d transfers", len(inserted)) bu := transferParsers.UpdateTokenBalances(newTransfers) for i := range bu { diff --git a/scripts/migration/migrations/fill_tzip.go b/scripts/migration/migrations/fill_tzip.go index c68dc5e6c..976ea0d29 100644 --- a/scripts/migration/migrations/fill_tzip.go +++ b/scripts/migration/migrations/fill_tzip.go @@ -84,7 +84,7 @@ func (m *FillTZIP) Do(ctx *config.Context) error { } } - logger.WithField("new", len(inserts)).WithField("updates", len(updates)).Info("Saving metadata...") + logger.Info().Int("new", len(inserts)).Int("updates", len(updates)).Msg("Saving metadata...") if err := ctx.StorageDB.Save(inserts); err != nil { return err } @@ -134,7 +134,7 @@ func processTzipItem(ctx *config.Context, item repository.Item, inserts, updates case ctx.Storage.IsRecordNotFound(err): *inserts = append(*inserts, &model.DApps[i]) default: - logger.Error(err) + logger.Err(err) return err } } @@ -145,7 +145,7 @@ func processTzipItem(ctx *config.Context, item repository.Item, inserts, updates *inserts = append(*inserts, &model.DApps[i]) } default: - logger.Error(err) + logger.Err(err) return err } diff --git a/scripts/migration/migrations/fix_id.go b/scripts/migration/migrations/fix_id.go index 0ebd706cc..3df862c5a 100644 --- a/scripts/migration/migrations/fix_id.go +++ b/scripts/migration/migrations/fix_id.go @@ -23,7 +23,7 @@ func (m *FixZeroID) Description() string { // Do - migrate function func (m *FixZeroID) Do(ctx *config.Context) error { return ctx.StorageDB.DB.Transaction(func(tx *gorm.DB) error { - logger.Info("setting new ids for token metadata...") + logger.Info().Msg("setting new ids for token metadata...") var id int64 limit := 1000 @@ -47,7 +47,7 @@ func (m *FixZeroID) Do(ctx *config.Context) error { end = len(tokens) < limit } - logger.Info("creating sequence...") + logger.Info().Msg("creating sequence...") return tx.Exec(` CREATE SEQUENCE token_metadata_id_seq; ALTER TABLE token_metadata ALTER COLUMN id SET DEFAULT nextval('token_metadata_id_seq'); diff --git a/scripts/migration/migrations/get_aliases.go b/scripts/migration/migrations/get_aliases.go index 64f3c24a9..5268315e6 100644 --- a/scripts/migration/migrations/get_aliases.go +++ b/scripts/migration/migrations/get_aliases.go @@ -28,20 +28,20 @@ func (m *GetAliases) Description() string { // Do - migrate function func (m *GetAliases) Do(ctx *config.Context) error { - logger.Info("Starting get aliases...") + logger.Info().Msg("Starting get aliases...") cfg := ctx.Config.TzKT["mainnet"] timeout := time.Duration(cfg.Timeout) * time.Second api := tzkt.NewTzKT(cfg.URI, timeout) - logger.Info("TzKT API initialized") + logger.Info().Msg("TzKT API initialized") aliases, err := api.GetAliases() if err != nil { - logger.Fatal(err) + return err } - logger.Info("Got %d aliases from tzkt api", len(aliases)) - logger.Info("Saving aliases...") + logger.Info().Msgf("Got %d aliases from tzkt api", len(aliases)) + logger.Info().Msg("Saving aliases...") newModels := make([]models.Model, 0) updated := make([]models.Model, 0) @@ -69,7 +69,6 @@ func (m *GetAliases) Do(ctx *config.Context) error { }, }) default: - logger.Error(err) return err } } diff --git a/scripts/migration/migrations/nft_metadata.go b/scripts/migration/migrations/nft_metadata.go index 7bcc0e5f5..0dab9544f 100644 --- a/scripts/migration/migrations/nft_metadata.go +++ b/scripts/migration/migrations/nft_metadata.go @@ -27,7 +27,7 @@ func (m *NFTMetadata) Description() string { // Do - migrate function func (m *NFTMetadata) Do(ctx *config.Context) error { - logger.Info("Getting all token metadata...") + logger.Info().Msg("Getting all token metadata...") if err := ctx.Storage.(*core.Postgres).DB.AutoMigrate(&tokenmetadata.TokenMetadata{}); err != nil { return err @@ -38,7 +38,7 @@ func (m *NFTMetadata) Do(ctx *config.Context) error { return err } - logger.Info("Found %d metadata with extra fields", len(metadata)) + logger.Info().Msgf("Found %d metadata with extra fields", len(metadata)) updated := make([]models.Model, len(metadata)) diff --git a/scripts/migration/migrations/operation_kind_to_enum.go b/scripts/migration/migrations/operation_kind_to_enum.go index 371f11308..1e6f52db8 100644 --- a/scripts/migration/migrations/operation_kind_to_enum.go +++ b/scripts/migration/migrations/operation_kind_to_enum.go @@ -31,17 +31,17 @@ func (m *OperationKindToEnum) Do(ctx *config.Context) error { return nil } - logger.Info("renaming 'kind' column to 'old_kind'...") + logger.Info().Msg("renaming 'kind' column to 'old_kind'...") if err := migrator.RenameColumn(model, "kind", "old_kind"); err != nil { return err } - logger.Info("creating new 'kind' column...") + logger.Info().Msg("creating new 'kind' column...") if err := migrator.AddColumn(model, "kind"); err != nil { return err } - logger.Info("setting 'kind' column value...") + logger.Info().Msg("setting 'kind' column value...") for _, kind := range []types.OperationKind{ types.OperationKindOrigination, types.OperationKindOriginationNew, types.OperationKindTransaction, } { @@ -49,7 +49,7 @@ func (m *OperationKindToEnum) Do(ctx *config.Context) error { return err } } - logger.Info("removing 'old_kind' column...") + logger.Info().Msg("removing 'old_kind' column...") return migrator.DropColumn(model, "old_kind") }) diff --git a/scripts/migration/migrations/parameter_events.go b/scripts/migration/migrations/parameter_events.go index c3e774802..31c44ceb2 100644 --- a/scripts/migration/migrations/parameter_events.go +++ b/scripts/migration/migrations/parameter_events.go @@ -41,16 +41,16 @@ func (m *ParameterEvents) Do(ctx *config.Context) error { return err } - logger.Info("Found %d tzips", len(tzips)) + logger.Info().Msgf("Found %d tzips", len(tzips)) - logger.Info("Execution events...") + logger.Info().Msg("Execution events...") for i := range tzips { for _, event := range tzips[i].Events { for _, impl := range event.Implementations { if impl.MichelsonParameterEvent.Empty() { continue } - logger.Info("%s...", tzips[i].Address) + logger.Info().Msgf("%s...", tzips[i].Address) protocol, err := ctx.Protocols.Get(tzips[i].Network, "", -1) if err != nil { @@ -112,7 +112,7 @@ func (m *ParameterEvents) Do(ctx *config.Context) error { } if err := parser.Parse(nil, proto.Hash, &op); err != nil { if errors.Is(err, noderpc.InvalidNodeResponse{}) { - logger.Error(err) + logger.Err(err) continue } return err @@ -137,12 +137,12 @@ func (m *ParameterEvents) Do(ctx *config.Context) error { } } - logger.Info("Delete %d transfers", len(deleted)) + logger.Info().Msgf("Delete %d transfers", len(deleted)) if err := ctx.Storage.BulkDelete(deleted); err != nil { return err } - logger.Info("Found %d transfers", len(inserted)) + logger.Info().Msgf("Found %d transfers", len(inserted)) bu := transferParser.UpdateTokenBalances(newTransfers) for i := range bu { inserted = append(inserted, bu[i]) diff --git a/scripts/migration/migrations/protocol.go b/scripts/migration/migrations/protocol.go index 2c3033d35..9393dbe6d 100644 --- a/scripts/migration/migrations/protocol.go +++ b/scripts/migration/migrations/protocol.go @@ -48,127 +48,127 @@ func (m *ProtocolField) Do(ctx *config.Context) error { } func (m *ProtocolField) migrateBlocks(tx *gorm.DB, protocols []protocol.Protocol) error { - logger.Info("Migrating blocks...") + logger.Info().Msg("Migrating blocks...") migrator := tx.Migrator() model := new(block.Block) if !migrator.HasColumn(model, "protocol_id") { - logger.Info("Adding `protocol_id` column...") + logger.Info().Msg("Adding `protocol_id` column...") if err := migrator.AddColumn(model, "protocol_id"); err != nil { return err } } if migrator.HasColumn(model, "protocol") { - logger.Info("Setting `protocol_id` value...") + logger.Info().Msg("Setting `protocol_id` value...") for i := range protocols { if err := tx.Model(model).Where("protocol = ?", protocols[i].Hash).Where("network = ?", protocols[i].Network).Update("protocol_id", protocols[i].ID).Error; err != nil { return err } } - logger.Info("Removing `protocol` column...") + logger.Info().Msg("Removing `protocol` column...") return migrator.DropColumn(model, "protocol") } return nil } func (m *ProtocolField) migrateBigMapDiff(tx *gorm.DB, protocols []protocol.Protocol) error { - logger.Info("Migrating bigmapdiff...") + logger.Info().Msg("Migrating bigmapdiff...") migrator := tx.Migrator() model := new(bigmapdiff.BigMapDiff) if !migrator.HasColumn(model, "protocol_id") { - logger.Info("Adding `protocol_id` column...") + logger.Info().Msg("Adding `protocol_id` column...") if err := migrator.AddColumn(model, "protocol_id"); err != nil { return err } } if migrator.HasColumn(model, "protocol") { - logger.Info("Setting `protocol_id` value...") + logger.Info().Msg("Setting `protocol_id` value...") for i := range protocols { if err := tx.Model(model).Where("protocol = ?", protocols[i].Hash).Where("network = ?", protocols[i].Network).Update("protocol_id", protocols[i].ID).Error; err != nil { return err } } - logger.Info("Removing `protocol` column...") + logger.Info().Msg("Removing `protocol` column...") return migrator.DropColumn(model, "protocol") } return nil } func (m *ProtocolField) migrateOperations(tx *gorm.DB, protocols []protocol.Protocol) error { - logger.Info("Migrating operaitons...") + logger.Info().Msg("Migrating operaitons...") migrator := tx.Migrator() model := new(operation.Operation) if !migrator.HasColumn(model, "protocol_id") { - logger.Info("Adding `protocol_id` column...") + logger.Info().Msg("Adding `protocol_id` column...") if err := migrator.AddColumn(model, "protocol_id"); err != nil { return err } } if migrator.HasColumn(model, "protocol") { - logger.Info("Setting `protocol_id` value...") + logger.Info().Msg("Setting `protocol_id` value...") for i := range protocols { if err := tx.Model(model).Where("protocol = ?", protocols[i].Hash).Where("network = ?", protocols[i].Network).Update("protocol_id", protocols[i].ID).Error; err != nil { return err } } - logger.Info("Removing `protocol` column...") + logger.Info().Msg("Removing `protocol` column...") return migrator.DropColumn(model, "protocol") } return nil } func (m *ProtocolField) migrateMigrations(tx *gorm.DB, protocols []protocol.Protocol) error { - logger.Info("Migrating migrations...") + logger.Info().Msg("Migrating migrations...") migrator := tx.Migrator() model := new(migration.Migration) if !migrator.HasColumn(model, "protocol_id") { - logger.Info("Adding `protocol_id` column...") + logger.Info().Msg("Adding `protocol_id` column...") if err := migrator.AddColumn(model, "protocol_id"); err != nil { return err } } if migrator.HasColumn(model, "protocol") { - logger.Info("Setting `protocol_id` value...") + logger.Info().Msg("Setting `protocol_id` value...") for i := range protocols { if err := tx.Model(model).Where("protocol = ?", protocols[i].Hash).Where("network = ?", protocols[i].Network).Update("protocol_id", protocols[i].ID).Error; err != nil { return err } } - logger.Info("Removing `protocol` column...") + logger.Info().Msg("Removing `protocol` column...") return migrator.DropColumn(model, "protocol") } if !migrator.HasColumn(model, "prev_protocol_id") { - logger.Info("Adding `prev_protocol_id` column...") + logger.Info().Msg("Adding `prev_protocol_id` column...") if err := migrator.AddColumn(model, "prev_protocol_id"); err != nil { return err } } if migrator.HasColumn(model, "prev_protocol") { - logger.Info("Setting `prev_protocol_id` value...") + logger.Info().Msg("Setting `prev_protocol_id` value...") for i := range protocols { if err := tx.Model(model).Where("prev_protocol = ?", protocols[i].Hash).Where("network = ?", protocols[i].Network).Update("prev_protocol_id", protocols[i].ID).Error; err != nil { return err } } - logger.Info("Removing `prev_protocol` column...") + logger.Info().Msg("Removing `prev_protocol` column...") return migrator.DropColumn(model, "prev_protocol") } diff --git a/scripts/migration/migrations/tags_to int.go b/scripts/migration/migrations/tags_to int.go index 710cb362d..f33b9ab22 100644 --- a/scripts/migration/migrations/tags_to int.go +++ b/scripts/migration/migrations/tags_to int.go @@ -42,7 +42,7 @@ func (m *TagsToInt) Do(ctx *config.Context) error { } func (m *TagsToInt) migrate(tx *gorm.DB, model models.Model) error { - logger.Info("migrating %s...", model.GetIndex()) + logger.Info().Msgf("migrating %s...", model.GetIndex()) type item struct { ID int64 diff --git a/scripts/migration/migrations/token_balance_recalc.go b/scripts/migration/migrations/token_balance_recalc.go index 14a800ec4..756390c68 100644 --- a/scripts/migration/migrations/token_balance_recalc.go +++ b/scripts/migration/migrations/token_balance_recalc.go @@ -59,13 +59,13 @@ func (m *TokenBalanceRecalc) Recalc(ctx *config.Context, network, address string } if !bcd.IsContract(address) { - logger.Errorf("Invalid contract address: `%s`", address) + logger.Error().Msgf("Invalid contract address: `%s`", address) return nil } typ := types.NewNetwork(network) - logger.Info("Removing token balance entities....") + logger.Info().Msg("Removing token balance entities....") if err := ctx.Storage.DeleteByContract(typ, []string{models.DocTokenBalances}, address); err != nil { return err } @@ -74,7 +74,7 @@ func (m *TokenBalanceRecalc) Recalc(ctx *config.Context, network, address string if err != nil { return err } - logger.Info("Received %d balances", len(balances)) + logger.Info().Msgf("Received %d balances", len(balances)) updates := make([]models.Model, 0) for _, balance := range balances { @@ -88,7 +88,7 @@ func (m *TokenBalanceRecalc) Recalc(ctx *config.Context, network, address string }) } - logger.Info("Saving...") + logger.Info().Msg("Saving...") return ctx.Storage.Save(updates) } @@ -111,7 +111,7 @@ func (m *TokenBalanceRecalc) RecalcAllContractEvents(ctx *config.Context) error } for _, tzip := range tzips { - logger.Info("Starting %s %s", tzip.Network, tzip.Address) + logger.Info().Msgf("Starting %s %s", tzip.Network, tzip.Address) if err := m.Recalc(ctx, tzip.Network.String(), tzip.Address); err != nil { return err } diff --git a/scripts/migration/migrations/token_metadata_unknown.go b/scripts/migration/migrations/token_metadata_unknown.go index 5abdac519..f7f3617b8 100644 --- a/scripts/migration/migrations/token_metadata_unknown.go +++ b/scripts/migration/migrations/token_metadata_unknown.go @@ -38,7 +38,7 @@ func (m *TokenMetadataUnknown) Do(ctx *config.Context) error { if err != nil { return err } - logger.Info("Found %d unknown metadata", len(metadata)) + logger.Info().Msgf("Found %d unknown metadata", len(metadata)) bar := progressbar.NewOptions(len(metadata), progressbar.OptionSetPredictTime(false), progressbar.OptionClearOnFinish(), progressbar.OptionShowCount()) @@ -66,7 +66,7 @@ func (m *TokenMetadataUnknown) Do(ctx *config.Context) error { remoteMetadata := new(tokens.TokenMetadata) if err := s.Get(link, remoteMetadata); err != nil { if errors.Is(err, tzipStorage.ErrNoIPFSResponse) { - logger.WithField("url", link).WithField("kind", "token_metadata").Warning(err) + logger.Warning().Err(err).Str("url", link).Str("kind", "token_metadata").Msg("") continue } return err diff --git a/scripts/nginx/main.go b/scripts/nginx/main.go index 6527d3c7f..7a405a174 100644 --- a/scripts/nginx/main.go +++ b/scripts/nginx/main.go @@ -12,7 +12,8 @@ import ( func main() { cfg, err := config.LoadDefaultConfig() if err != nil { - logger.Fatal(err) + logger.Err(err) + return } ctx := config.NewContext( @@ -23,12 +24,14 @@ func main() { dapps, err := ctx.DApps.All() if err != nil { - logger.Fatal(err) + logger.Err(err) + return } aliases, err := ctx.TZIP.GetAliases(types.Mainnet) if err != nil { - logger.Fatal(err) + logger.Err(err) + return } outputDir := fmt.Sprintf("%s/nginx", cfg.SharePath) @@ -36,16 +39,19 @@ func main() { env := os.Getenv("BCD_ENV") if env == "" { - logger.Fatal(fmt.Errorf("BCD_ENV env var is empty")) + logger.Err(fmt.Errorf("BCD_ENV env var is empty")) + return } nginxConfigFilename := fmt.Sprintf("%s/default.%s.conf", outputDir, env) if err := makeNginxConfig(dapps, aliases, nginxConfigFilename, ctx.Config.BaseURL); err != nil { - logger.Fatal(err) + logger.Err(err) + return } sitemapFilename := fmt.Sprintf("%s/sitemap.%s.xml", outputDir, env) if err := makeSitemap(dapps, aliases, sitemapFilename, ctx.Config); err != nil { - logger.Fatal(err) + logger.Err(err) + return } } diff --git a/scripts/nginx/nginx.go b/scripts/nginx/nginx.go index 8cdaab43a..e047ca1e5 100644 --- a/scripts/nginx/nginx.go +++ b/scripts/nginx/nginx.go @@ -95,10 +95,10 @@ func makeNginxConfig(dapps []dapp.DApp, _ []tzip.TZIP, filepath, baseURL string) defer file.Close() if _, err = file.WriteString(defaultConf); err != nil { - logger.Fatal(err) + return err } - logger.Info("Nginx default config created in %s", filepath) + logger.Info().Msgf("Nginx default config created in %s", filepath) return nil } diff --git a/scripts/nginx/sitemap.go b/scripts/nginx/sitemap.go index 15a19b388..512ebdb5b 100644 --- a/scripts/nginx/sitemap.go +++ b/scripts/nginx/sitemap.go @@ -35,7 +35,7 @@ func makeSitemap(dapps []dapp.DApp, aliases []tzip.TZIP, filepath string, cfg co return err } - logger.Info("Sitemap created in %s", filepath) + logger.Info().Msgf("Sitemap created in %s", filepath) return nil }