Skip to content

Commit

Permalink
debug log
Browse files Browse the repository at this point in the history
add more debug log

add log in fixup
  • Loading branch information
sunny2022da committed Jul 30, 2024
1 parent f2ff2a4 commit 8334c7f
Show file tree
Hide file tree
Showing 3 changed files with 77 additions and 4 deletions.
2 changes: 1 addition & 1 deletion core/parallel_state_processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -489,7 +489,7 @@ func (p *ParallelStateProcessor) toConfirmTxIndex(targetTxIndex int, isStage2 bo
func (p *ParallelStateProcessor) toConfirmTxIndexResult(txResult *ParallelTxResult, isStage2 bool) bool {
txReq := txResult.txReq
if p.hasConflict(txResult, isStage2) {
log.Debug("HasConflict!! block: %d, txIndex: %d\n", txResult.txReq.block.NumberU64(), txResult.txReq.txIndex)
log.Debug(fmt.Sprintf("HasConflict!! block: %d, txIndex: %d\n", txResult.txReq.block.NumberU64(), txResult.txReq.txIndex))
return false
}
if isStage2 { // not its turn
Expand Down
30 changes: 30 additions & 0 deletions core/state/state_object.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package state
import (
"bytes"
"fmt"
"github.com/ethereum/go-ethereum/log"
"io"
"math/big"
"sync"
Expand Down Expand Up @@ -536,6 +537,12 @@ func (s *stateObject) updateTrie() (Trie, error) {
s.storageRecordsLock.Lock()
defer s.storageRecordsLock.Unlock()
}

if true {
log.Debug(fmt.Sprintf("Dav - updating trie before finalize - addr: %s, Stroages:\nPending: %s\nDirty: %s\nOrigin: %s\n",
s.address, s.pendingStorage.String(), s.dirtyStorage.String(), s.originStorage.String()))

}
// Make sure all dirty slots are finalized into the pending storage area
s.finalise(false)

Expand Down Expand Up @@ -579,6 +586,11 @@ func (s *stateObject) updateTrie() (Trie, error) {
dirtyStorage[key] = v
return true
})
if true {
log.Debug(fmt.Sprintf("Dav - updating trie before cal trie - addr: %s, s.data.root: %s, s.trie.hash: %s Stroages:\nPending: %s\nDirty: %s\nOrigin: %s\n",
s.address, s.data.Root, s.trie.Hash(), s.pendingStorage.String(), s.dirtyStorage.String(), s.originStorage.String()))

}
var wg sync.WaitGroup
wg.Add(1)
go func() {
Expand Down Expand Up @@ -648,6 +660,12 @@ func (s *stateObject) updateTrie() (Trie, error) {
}

s.pendingStorage = newStorage(s.isParallel) // reset pending map

if true {
log.Debug(fmt.Sprintf("Dav - updating trie before exit - addr: %s, s.data.root: %s, s.trie.hash: %s\n",
s.address, s.data.Root, s.trie.Hash()))

}
return tr, nil
/*
s.pendingStorage.Range(func(keyItf, valueItf interface{}) bool {
Expand Down Expand Up @@ -990,6 +1008,13 @@ func (s *stateObject) fixUpOriginAndResetPendingStorage() {
if origObj != nil && origObj.originStorage.Length() != 0 {
// There can be racing issue with CopyForSlot/LightCopy
origObj.storageRecordsLock.RLock()
if true {
log.Debug(fmt.Sprintf("\n----vvvvvvvvvv----\nDav - fixUpOriginAndResetPendingStorage before fixup - addr: %s, Stroages:\nPending: %s\nDirty: %s\nOrigin: %s\nmainDBObj(Index: %d): Stroages:\nPending: %s\nDirty: %s\nOrigin: %s\n",
s.address, s.pendingStorage.String(), s.dirtyStorage.String(), s.originStorage.String(),
mainDB.TxIndex(),
origObj.pendingStorage.String(), origObj.dirtyStorage.String(), origObj.originStorage.String()))

}
originStorage := origObj.originStorage.Copy()
origObj.storageRecordsLock.RUnlock()
// During the tx execution, the originStorage can be updated with GetCommittedState()
Expand All @@ -1006,10 +1031,15 @@ func (s *stateObject) fixUpOriginAndResetPendingStorage() {
})
s.originStorage = originStorage
}

// isParallel is unnecessary since the pendingStorage for slotObject will be used serially from now on.
if s.pendingStorage.Length() > 0 {
s.pendingStorage = newStorage(false)
}
if true {
log.Debug(fmt.Sprintf("Dav - fixUpOriginAndResetPendingStorage after fixup - addr: %s, Stroages:\nPending: %s\nDirty: %s\nOrigin: %s\n----^^^^^^^^^^----\n",
s.address, s.pendingStorage.String(), s.dirtyStorage.String(), s.originStorage.String()))
}
s.storageRecordsLock.Unlock()
}
}
49 changes: 46 additions & 3 deletions core/state/statedb.go
Original file line number Diff line number Diff line change
Expand Up @@ -1538,7 +1538,9 @@ func (s *StateDB) GetRefund() uint64 {
// into the tries just yet. Only IntermediateRoot or Commit will do that.
func (s *StateDB) Finalise(deleteEmptyObjects bool) {
addressesToPrefetch := make([][]byte, 0, len(s.journal.dirties))

if true {
log.Debug(fmt.Sprintf("Dav - Finalise - enter - tx: %d\n", s.txIndex))
}
// finalise stateObjectsDestruct
for addr, acc := range s.stateObjectsDestructDirty {
s.stateObjectsDestruct[addr] = acc
Expand Down Expand Up @@ -1615,6 +1617,11 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
}
// Invalidate journal because reverting across transactions is not allowed.
s.clearJournalAndRefund()

if true {
log.Debug(fmt.Sprintf("Dav - Finalise - exit - tx: %d\ns.stateObjectsPending(%d) %v\n", s.txIndex, len(s.stateObjectsPending), s.stateObjectsPending))
//debug.PrintStack()
}
}

// IntermediateRoot computes the current root hash of the state trie.
Expand All @@ -1623,9 +1630,22 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) {
// TODO: For parallel SlotDB, IntermediateRootForSlot is used, need to clean up this method.
func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
// Finalise all the dirty storage states and write them into the tries
if true {
log.Debug(fmt.Sprintf("Dav - IntermediateRoot - tx: %d, before Finalise, s.trie.hash: %s\n", s.txIndex, s.trie.Hash()))
}
s.Finalise(deleteEmptyObjects)
if true {
log.Debug(fmt.Sprintf("Dav - IntermediateRoot - tx: %d, before AccountsIntermediateRoot, s.trie.hash: %s\n", s.txIndex, s.trie.Hash()))
}
s.AccountsIntermediateRoot()
return s.StateIntermediateRoot()
if true {
log.Debug(fmt.Sprintf("Dav - IntermediateRoot - tx: %d, after AccountsIntermediateRoot, s.trie.hash: %s\n", s.txIndex, s.trie.Hash()))
}
res := s.StateIntermediateRoot()
if true {
log.Debug(fmt.Sprintf("Dav - IntermediateRoot - tx: %d, after StateIntermediateRoot, s.trie.hash: %s\n", s.txIndex, s.trie.Hash()))
}
return res
}

func (s *StateDB) AccountsIntermediateRoot() {
Expand Down Expand Up @@ -1657,8 +1677,13 @@ func (s *StateDB) AccountsIntermediateRoot() {
wg.Add(1)
tasks <- func() {
defer wg.Done()
if false {
log.Debug(fmt.Sprintf("Dav - AccountsIntermediateRoot(P) - tx: %d, before obj.updateRoot(), obj: %s, s.data: %v, s.trie.hash: %s\n", s.txIndex, obj.address, obj.data, s.trie.Hash()))
}
obj.updateRoot()

if true {
log.Debug(fmt.Sprintf("Dav - AccountsIntermediateRoot(P) - tx: %d, after obj.updateRoot(), obj: %s, s.data: %v, s.trie.hash: %s\n", s.txIndex, obj.address, obj.data, s.trie.Hash()))
}
// Cache the data until commit. Note, this update mechanism is not symmetric
// to the deletion, because whereas it is enough to track account updates
// at commit time, deletions need tracking at transaction boundary level to
Expand All @@ -1673,7 +1698,13 @@ func (s *StateDB) AccountsIntermediateRoot() {
wg.Add(1)
tasks <- func() {
defer wg.Done()
if true {
log.Debug(fmt.Sprintf("Dav - AccountsIntermediateRoot(s) - tx: %d, before obj.updateRoot(), obj: %s, s.data: %v, s.trie.hash: %s\n", s.txIndex, obj.address, obj.data, s.trie.Hash()))
}
obj.updateRoot()
if true {
log.Debug(fmt.Sprintf("Dav - AccountsIntermediateRoot(s) - tx: %d, after obj.updateRoot(), obj: %s, s.data: %v, s.trie.hash: %s\n", s.txIndex, obj.address, obj.data, s.trie.Hash()))
}

// Cache the data until commit. Note, this update mechanism is not symmetric
// to the deletion, because whereas it is enough to track account updates
Expand Down Expand Up @@ -1731,16 +1762,28 @@ func (s *StateDB) StateIntermediateRoot() common.Hash {
if s.parallel.isSlotDB {
if obj := s.parallel.dirtiedStateObjectsInSlot[addr]; obj.deleted {
s.deleteStateObject(obj)
if true {
log.Debug(fmt.Sprintf("Dav - StateIntermediateRoot(p) - State Processing,deleteStateObj: %s\n", obj.address))
}
s.AccountDeleted += 1
} else {
s.updateStateObject(obj)
if true {
log.Debug(fmt.Sprintf("Dav - StateIntermediateRoot(p) - State Processing,updateStateObj: %s, obj.data: %v\n", obj.address, obj.data))
}
s.AccountUpdated += 1
}
} else if obj, _ := s.getStateObjectFromStateObjects(addr); obj.deleted {
s.deleteStateObject(obj)
if true {
log.Debug(fmt.Sprintf("Dav - StateIntermediateRoot(s) - State Processing,deleteStateObj: %s\n", obj.address))
}
s.AccountDeleted += 1
} else {
s.updateStateObject(obj)
if true {
log.Debug(fmt.Sprintf("Dav - StateIntermediateRoot(s, %v)- State Processing,updateStateObj: %s, obj.data: %v\n", s.isParallel, obj.address, obj.data))
}
s.AccountUpdated += 1
}
usedAddrs = append(usedAddrs, common.CopyBytes(addr[:])) // Copy needed for closure
Expand Down

0 comments on commit 8334c7f

Please sign in to comment.