diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index ff7c7e97f3..f8ceec8838 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -63,9 +63,10 @@ type SimulatedBackend struct { database ethdb.Database // In memory database to store our testing data blockchain *core.BlockChain // Ethereum blockchain to handle the consensus - mu sync.Mutex - pendingBlock *types.Block // Currently pending block that will be imported on request - pendingState *state.StateDB // Currently pending state that will be the active on request + mu sync.Mutex + pendingBlock *types.Block // Currently pending block that will be imported on request + pendingState *state.StateDB // Currently pending state that will be the active on request + pendingReceipts types.Receipts // Currently receipts for the pending block events *filters.EventSystem // Event system for filtering log events live @@ -84,8 +85,8 @@ func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.Genesis database: database, blockchain: blockchain, config: genesis.Config, - events: filters.NewEventSystem(&filterBackend{database, blockchain}, false), } + backend.events = filters.NewEventSystem(&filterBackend{database, blockchain, backend}, false) backend.rollback(blockchain.CurrentBlock()) return backend } @@ -662,7 +663,7 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa return fmt.Errorf("invalid transaction nonce: got %d, want %d", tx.Nonce(), nonce) } // Include tx in chain - blocks, _ := core.GenerateChain(b.config, block, ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) { + blocks, receipts := core.GenerateChain(b.config, block, ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) { for _, tx := range b.pendingBlock.Transactions() { block.AddTxWithChain(b.blockchain, tx) } @@ -672,6 +673,7 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa b.pendingBlock = blocks[0] b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil) + b.pendingReceipts = receipts[0] return nil } @@ -683,7 +685,7 @@ func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.Filter var filter *filters.Filter if query.BlockHash != nil { // Block filter requested, construct a single-shot filter - filter = filters.NewBlockFilter(&filterBackend{b.database, b.blockchain}, *query.BlockHash, query.Addresses, query.Topics) + filter = filters.NewBlockFilter(&filterBackend{b.database, b.blockchain, b}, *query.BlockHash, query.Addresses, query.Topics) } else { // Initialize unset filter boundaries to run from genesis to chain head from := int64(0) @@ -695,7 +697,7 @@ func (b *SimulatedBackend) FilterLogs(ctx context.Context, query ethereum.Filter to = query.ToBlock.Int64() } // Construct the range filter - filter = filters.NewRangeFilter(&filterBackend{b.database, b.blockchain}, from, to, query.Addresses, query.Topics, false) + filter = filters.NewRangeFilter(&filterBackend{b.database, b.blockchain, b}, from, to, query.Addresses, query.Topics, false) } // Run the filter and return all the logs logs, err := filter.Logs(ctx) @@ -816,8 +818,9 @@ func (m callMsg) AccessList() types.AccessList { return m.CallMsg.AccessList } // filterBackend implements filters.Backend to support filtering for logs without // taking bloom-bits acceleration structures into account. type filterBackend struct { - db ethdb.Database - bc *core.BlockChain + db ethdb.Database + bc *core.BlockChain + backend *SimulatedBackend } func (fb *filterBackend) ChainDb() ethdb.Database { return fb.db } @@ -834,6 +837,10 @@ func (fb *filterBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*t return fb.bc.GetHeaderByHash(hash), nil } +func (fb *filterBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { + return fb.backend.pendingBlock, fb.backend.pendingReceipts +} + func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) { number := rawdb.ReadHeaderNumber(fb.db, hash) if number == nil { diff --git a/accounts/url_test.go b/accounts/url_test.go index bd6f35fa2a..239aa06d22 100644 --- a/accounts/url_test.go +++ b/accounts/url_test.go @@ -32,9 +32,10 @@ func TestURLParsing(t *testing.T) { t.Errorf("expected: %v, got: %v", "ethereum.org", url.Path) } - _, err = parseURL("ethereum.org") - if err == nil { - t.Error("expected err, got: nil") + for _, u := range []string{"ethereum.org", ""} { + if _, err = parseURL(u); err == nil { + t.Errorf("input %v, expected err, got: nil", u) + } } } diff --git a/cmd/ethkey/message.go b/cmd/ethkey/message.go index 69c8cf0923..fbd80d6e6d 100644 --- a/cmd/ethkey/message.go +++ b/cmd/ethkey/message.go @@ -21,6 +21,7 @@ import ( "fmt" "io/ioutil" + "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common" @@ -68,7 +69,7 @@ To sign a message contained in a file, use the --msgfile flag. utils.Fatalf("Error decrypting key: %v", err) } - signature, err := crypto.Sign(signHash(message), key.PrivateKey) + signature, err := crypto.Sign(accounts.TextHash(message), key.PrivateKey) if err != nil { utils.Fatalf("Failed to sign message: %v", err) } @@ -113,7 +114,7 @@ It is possible to refer to a file containing the message.`, utils.Fatalf("Signature encoding is not hexadecimal: %v", err) } - recoveredPubkey, err := crypto.SigToPub(signHash(message), signature) + recoveredPubkey, err := crypto.SigToPub(accounts.TextHash(message), signature) if err != nil || recoveredPubkey == nil { utils.Fatalf("Signature verification failed: %v", err) } diff --git a/cmd/ethkey/utils.go b/cmd/ethkey/utils.go index 70baae92f4..f5e87f922e 100644 --- a/cmd/ethkey/utils.go +++ b/cmd/ethkey/utils.go @@ -23,7 +23,6 @@ import ( "strings" "github.com/ethereum/go-ethereum/cmd/utils" - "github.com/ethereum/go-ethereum/crypto" "gopkg.in/urfave/cli.v1" ) @@ -46,18 +45,6 @@ func getPassphrase(ctx *cli.Context, confirmation bool) string { return utils.GetPassPhrase("", confirmation) } -// signHash is a helper function that calculates a hash for the given message -// that can be safely used to calculate a signature from. -// -// The hash is calculated as -// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}). -// -// This gives context to the signed message and prevents signing of transactions. -func signHash(data []byte) []byte { - msg := fmt.Sprintf("\x19Ethereum Signed Message:\n%d%s", len(data), data) - return crypto.Keccak256([]byte(msg)) -} - // mustPrintJSON prints the JSON encoding of the given object and // exits the program with an error message when the marshaling fails. func mustPrintJSON(jsonObject interface{}) { diff --git a/common/format.go b/common/format.go index 6fc21af719..7af41f52d5 100644 --- a/common/format.go +++ b/common/format.go @@ -27,12 +27,12 @@ import ( // the unnecessary precision off from the formatted textual representation. type PrettyDuration time.Duration -var prettyDurationRe = regexp.MustCompile(`\.[0-9]+`) +var prettyDurationRe = regexp.MustCompile(`\.[0-9]{4,}`) // String implements the Stringer interface, allowing pretty printing of duration // values rounded to three decimals. func (d PrettyDuration) String() string { - label := fmt.Sprintf("%v", time.Duration(d)) + label := time.Duration(d).String() if match := prettyDurationRe.FindString(label); len(match) > 4 { label = strings.Replace(label, match, match[:4], 1) } diff --git a/core/blockchain.go b/core/blockchain.go index 832eba973b..572b80bab1 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -2270,8 +2270,8 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { oldChain types.Blocks commonBlock *types.Block - deletedTxs types.Transactions - addedTxs types.Transactions + deletedTxs []common.Hash + addedTxs []common.Hash deletedLogs [][]*types.Log rebirthLogs [][]*types.Log @@ -2281,7 +2281,9 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { // Old chain is longer, gather all transactions and logs as deleted ones for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) { oldChain = append(oldChain, oldBlock) - deletedTxs = append(deletedTxs, oldBlock.Transactions()...) + for _, tx := range oldBlock.Transactions() { + deletedTxs = append(deletedTxs, tx.Hash()) + } // Collect deleted logs for notification logs := bc.collectLogs(oldBlock.Hash(), true) @@ -2311,7 +2313,9 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { } // Remove an old block as well as stash away a new block oldChain = append(oldChain, oldBlock) - deletedTxs = append(deletedTxs, oldBlock.Transactions()...) + for _, tx := range oldBlock.Transactions() { + deletedTxs = append(deletedTxs, tx.Hash()) + } // Collect deleted logs for notification logs := bc.collectLogs(oldBlock.Hash(), true) @@ -2330,6 +2334,7 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { return fmt.Errorf("invalid new chain") } } + // Ensure the user sees large reorgs if len(oldChain) > 0 && len(newChain) > 0 { logFn := log.Info @@ -2346,7 +2351,7 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { } else if len(newChain) > 0 { // Special case happens in the post merge stage that current head is // the ancestor of new head while these two blocks are not consecutive - log.Info("Extend chain", "add", len(newChain), "number", newChain[0].NumberU64(), "hash", newChain[0].Hash()) + log.Info("Extend chain", "add", len(newChain), "number", newChain[0].Number(), "hash", newChain[0].Hash()) blockReorgAddMeter.Mark(int64(len(newChain))) } else { // len(newChain) == 0 && len(oldChain) > 0 @@ -2359,19 +2364,17 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { // Insert the block in the canonical way, re-writing history bc.writeHeadBlock(newChain[i]) - // Collect reborn logs due to chain reorg - logs := bc.collectLogs(newChain[i].Hash(), false) - if len(logs) > 0 { - rebirthLogs = append(rebirthLogs, logs) - } // Collect the new added transactions. - addedTxs = append(addedTxs, newChain[i].Transactions()...) + for _, tx := range newChain[i].Transactions() { + addedTxs = append(addedTxs, tx.Hash()) + } } + // Delete useless indexes right now which includes the non-canonical // transaction indexes, canonical chain indexes which above the head. indexesBatch := bc.db.NewBatch() - for _, tx := range types.TxDifference(deletedTxs, addedTxs) { - rawdb.DeleteTxLookupEntry(indexesBatch, tx.Hash()) + for _, tx := range types.HashDifference(deletedTxs, addedTxs) { + rawdb.DeleteTxLookupEntry(indexesBatch, tx) } // Delete any canonical number assignments above the new head number := bc.CurrentBlock().NumberU64() @@ -2385,6 +2388,15 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { if err := indexesBatch.Write(); err != nil { log.Crit("Failed to delete useless indexes", "err", err) } + + // Collect the logs + for i := len(newChain) - 1; i >= 1; i-- { + // Collect reborn logs due to chain reorg + logs := bc.collectLogs(newChain[i].Hash(), false) + if len(logs) > 0 { + rebirthLogs = append(rebirthLogs, logs) + } + } // If any logs need to be fired, do it now. In theory we could avoid creating // this goroutine if there are no events to fire, but realistcally that only // ever happens if we're reorging empty blocks, which will only happen on idle diff --git a/core/types/block.go b/core/types/block.go index a63666a76d..f70978e9b2 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -202,10 +202,6 @@ type Block struct { hash atomic.Value size atomic.Value - // Td is used by package core to store the total difficulty - // of the chain up to and including the block. - td *big.Int - // These fields are used by package eth to track // inter-peer block relay. ReceivedAt time.Time @@ -227,7 +223,7 @@ type extblock struct { // are ignored and set to values derived from the given txs, uncles // and receipts. func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt, hasher TrieHasher) *Block { - b := &Block{header: CopyHeader(header), td: new(big.Int)} + b := &Block{header: CopyHeader(header)} // TODO: panic if len(txs) != len(receipts) if len(txs) == 0 { diff --git a/core/types/transaction.go b/core/types/transaction.go index 1e6c45cd7f..e828c8dcb3 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -449,6 +449,24 @@ func TxDifference(a, b Transactions) Transactions { return keep } +// HashDifference returns a new set which is the difference between a and b. +func HashDifference(a, b []common.Hash) []common.Hash { + keep := make([]common.Hash, 0, len(a)) + + remove := make(map[common.Hash]struct{}) + for _, hash := range b { + remove[hash] = struct{}{} + } + + for _, hash := range a { + if _, ok := remove[hash]; !ok { + keep = append(keep, hash) + } + } + + return keep +} + // TxByNonce implements the sort interface to allow sorting a list of transactions // by their nonces. This is usually only useful for sorting transactions from a // single account, otherwise a nonce comparison doesn't make much sense. diff --git a/core/vm/analysis.go b/core/vm/analysis.go index 3733bab6a7..4aa8cfe70f 100644 --- a/core/vm/analysis.go +++ b/core/vm/analysis.go @@ -76,7 +76,7 @@ func codeBitmapInternal(code, bits bitvec) bitvec { for pc := uint64(0); pc < uint64(len(code)); { op := OpCode(code[pc]) pc++ - if op < PUSH1 || op > PUSH32 { + if int8(op) < int8(PUSH1) { // If not PUSH (the int8(op) > int(PUSH32) is always false). continue } numbits := op - PUSH1 + 1 diff --git a/eth/filters/filter.go b/eth/filters/filter.go index e3ccf7d892..a354eb9761 100644 --- a/eth/filters/filter.go +++ b/eth/filters/filter.go @@ -39,6 +39,7 @@ type Backend interface { HeaderByHash(ctx context.Context, blockHash common.Hash) (*types.Header, error) GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error) + PendingBlockAndReceipts() (*types.Block, types.Receipts) SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription @@ -134,18 +135,27 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { } return f.blockLogs(ctx, header) } + // Short-cut if all we care about is pending logs + if f.begin == rpc.PendingBlockNumber.Int64() { + if f.end != rpc.PendingBlockNumber.Int64() { + return nil, errors.New("invalid block range") + } + return f.pendingLogs() + } // Figure out the limits of the filter range header, _ := f.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber) if header == nil { return nil, nil } - head := header.Number.Uint64() - - if f.begin == -1 { + var ( + head = header.Number.Uint64() + end = uint64(f.end) + pending = f.end == rpc.PendingBlockNumber.Int64() + ) + if f.begin == rpc.LatestBlockNumber.Int64() { f.begin = int64(head) } - end := uint64(f.end) - if f.end == -1 { + if f.end == rpc.LatestBlockNumber.Int64() || f.end == rpc.PendingBlockNumber.Int64() { end = head } if f.rangeLimit && (int64(end)-f.begin) > maxFilterBlockRange { @@ -153,10 +163,10 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { } // Gather all indexed logs, and finish with non indexed ones var ( - logs []*types.Log - err error + logs []*types.Log + err error + size, sections = f.backend.BloomStatus() ) - size, sections := f.backend.BloomStatus() if indexed := sections * size; indexed > uint64(f.begin) { if indexed > end { logs, err = f.indexedLogs(ctx, end) @@ -169,6 +179,13 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { } rest, err := f.unindexedLogs(ctx, end) logs = append(logs, rest...) + if pending { + pendingLogs, err := f.pendingLogs() + if err != nil { + return nil, err + } + logs = append(logs, pendingLogs...) + } return logs, err } @@ -281,6 +298,19 @@ func (f *Filter) checkMatches(ctx context.Context, header *types.Header) (logs [ return nil, nil } +// pendingLogs returns the logs matching the filter criteria within the pending block. +func (f *Filter) pendingLogs() ([]*types.Log, error) { + block, receipts := f.backend.PendingBlockAndReceipts() + if bloomFilter(block.Bloom(), f.addresses, f.topics) { + var unfiltered []*types.Log + for _, r := range receipts { + unfiltered = append(unfiltered, r.Logs...) + } + return filterLogs(unfiltered, nil, nil, f.addresses, f.topics), nil + } + return nil, nil +} + func includes(addresses []common.Address, a common.Address) bool { for _, addr := range addresses { if addr == a { diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go index 220513405e..7435a19e86 100644 --- a/eth/filters/filter_system_test.go +++ b/eth/filters/filter_system_test.go @@ -106,6 +106,10 @@ func (b *testBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types return logs, nil } +func (b *testBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { + return nil, nil +} + func (b *testBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { return b.txFeed.Subscribe(ch) } @@ -592,7 +596,7 @@ func TestPendingLogsSubscription(t *testing.T) { // (some) events are posted. for i := range testCases { testCases[i].c = make(chan []*types.Log) - testCases[i].err = make(chan error) + testCases[i].err = make(chan error, 1) var err error testCases[i].sub, err = api.events.SubscribeLogs(testCases[i].crit, testCases[i].c) diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index 99e7ccce69..889370440e 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -67,6 +67,7 @@ type Backend interface { BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Block, error) StateAndHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*state.StateDB, *types.Header, error) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*state.StateDB, *types.Header, error) + PendingBlockAndReceipts() (*types.Block, types.Receipts) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) GetTd(ctx context.Context, hash common.Hash) *big.Int GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config) (*vm.EVM, func() error, error) diff --git a/p2p/discover/v4_udp_test.go b/p2p/discover/v4_udp_test.go index e36912f010..977fa85a06 100644 --- a/p2p/discover/v4_udp_test.go +++ b/p2p/discover/v4_udp_test.go @@ -284,6 +284,7 @@ func TestUDPv4_findnode(t *testing.T) { test.waitPacketOut(func(p *v4wire.Neighbors, to *net.UDPAddr, hash []byte) { if len(p.Nodes) != len(want) { t.Errorf("wrong number of results: got %d, want %d", len(p.Nodes), bucketSize) + return } for i, n := range p.Nodes { if n.ID.ID() != want[i].ID() { diff --git a/signer/core/signed_data.go b/signer/core/signed_data.go index 0bfd4f9eeb..59a69b3659 100644 --- a/signer/core/signed_data.go +++ b/signer/core/signed_data.go @@ -208,7 +208,7 @@ func (api *SignerAPI) determineSignatureFormat(ctx context.Context, contentType req = &SignDataRequest{ContentType: mediaType, Rawdata: parliaRlp, Messages: messages, Hash: sighash} default: // also case TextPlain.Mime: // Calculates an Ethereum ECDSA signature for: - // hash = keccak256("\x19${byteVersion}Ethereum Signed Message:\n${message length}${message}") + // hash = keccak256("\x19Ethereum Signed Message:\n${message length}${message}") // We expect it to be a string if stringData, ok := data.(string); !ok { return nil, useEthereumV, fmt.Errorf("input for text/plain must be an hex-encoded string") @@ -233,7 +233,7 @@ func (api *SignerAPI) determineSignatureFormat(ctx context.Context, contentType return req, useEthereumV, nil } -// SignTextWithValidator signs the given message which can be further recovered +// SignTextValidator signs the given message which can be further recovered // with the given validator. // hash = keccak256("\x19\x00"${address}${data}). func SignTextValidator(validatorData apitypes.ValidatorData) (hexutil.Bytes, string) { @@ -320,11 +320,11 @@ func (api *SignerAPI) EcRecover(ctx context.Context, data hexutil.Bytes, sig hex // // Note, this function is compatible with eth_sign and personal_sign. As such it recovers // the address of: - // hash = keccak256("\x19${byteVersion}Ethereum Signed Message:\n${message length}${message}") + // hash = keccak256("\x19Ethereum Signed Message:\n${message length}${message}") // addr = ecrecover(hash, signature) // // Note, the signature must conform to the secp256k1 curve R, S and V values, where - // the V value must be be 27 or 28 for legacy reasons. + // the V value must be 27 or 28 for legacy reasons. // // https://github.com/ethereum/go-ethereum/wiki/Management-APIs#personal_ecRecover if len(sig) != 65 { diff --git a/trie/committer.go b/trie/committer.go index 221681f2e2..5d9a27a503 100644 --- a/trie/committer.go +++ b/trie/committer.go @@ -193,9 +193,7 @@ func (c *committer) store(n node, db *Database) node { } else if db != nil { // No leaf-callback used, but there's still a database. Do serial // insertion - db.lock.Lock() db.insert(common.BytesToHash(hash), size, n) - db.lock.Unlock() } return hash } @@ -209,9 +207,7 @@ func (c *committer) commitLoop(db *Database) { n = item.node ) // We are pooling the trie nodes into an intermediate memory cache - db.lock.Lock() db.insert(hash, size, n) - db.lock.Unlock() if c.onleaf != nil { switch n := n.(type) { diff --git a/trie/database.go b/trie/database.go index 7504eb2c91..e388506115 100644 --- a/trie/database.go +++ b/trie/database.go @@ -316,6 +316,9 @@ func (db *Database) DiskDB() ethdb.KeyValueStore { // All nodes inserted by this function will be reference tracked // and in theory should only used for **trie nodes** insertion. func (db *Database) insert(hash common.Hash, size int, node node) { + db.lock.Lock() + defer db.lock.Unlock() + // If the node's already cached, skip if _, ok := db.dirties[hash]; ok { return