all: implement new version state history

pull/30107/head
Gary Rong 6 months ago
parent f59d013e40
commit 22d23591cb
  1. 4
      cmd/evm/internal/t8ntool/execution.go
  2. 2
      cmd/evm/runner.go
  3. 3
      cmd/geth/dbcmd.go
  4. 2
      core/blockchain.go
  5. 2
      core/blockchain_test.go
  6. 4
      core/chain_makers.go
  7. 4
      core/genesis.go
  8. 12
      core/state/state_object.go
  9. 6
      core/state/state_test.go
  10. 27
      core/state/statedb.go
  11. 2
      core/state/statedb_fuzz_test.go
  12. 28
      core/state/statedb_test.go
  13. 84
      core/state/stateupdate.go
  14. 2
      core/state/sync_test.go
  15. 12
      core/txpool/blobpool/blobpool_test.go
  16. 6
      eth/api_debug_test.go
  17. 2
      eth/state_accessor.go
  18. 4
      tests/state_test_util.go
  19. 14
      trie/triestate/state.go
  20. 79
      triedb/pathdb/database_test.go
  21. 2
      triedb/pathdb/disklayer.go
  22. 40
      triedb/pathdb/execute.go
  23. 40
      triedb/pathdb/history.go
  24. 8
      triedb/pathdb/history_inspect.go
  25. 2
      triedb/pathdb/history_test.go
  26. 12
      triedb/pathdb/journal.go

@ -346,7 +346,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig,
statedb.AddBalance(w.Address, uint256.MustFromBig(amount), tracing.BalanceIncreaseWithdrawal)
}
// Commit block
root, err := statedb.Commit(vmContext.BlockNumber.Uint64(), chainConfig.IsEIP158(vmContext.BlockNumber))
root, err := statedb.Commit(vmContext.BlockNumber.Uint64(), chainConfig.IsEIP158(vmContext.BlockNumber), chainConfig.IsCancun(vmContext.BlockNumber, vmContext.Time))
if err != nil {
return nil, nil, nil, NewError(ErrorEVM, fmt.Errorf("could not commit state: %v", err))
}
@ -392,7 +392,7 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc) *state.StateDB
}
}
// Commit and re-open to start with a clean state.
root, _ := statedb.Commit(0, false)
root, _ := statedb.Commit(0, false, false)
statedb, _ = state.New(root, sdb, nil)
return statedb
}

@ -272,7 +272,7 @@ func runCmd(ctx *cli.Context) error {
output, leftOverGas, stats, err := timedExec(bench, execFunc)
if ctx.Bool(DumpFlag.Name) {
root, err := statedb.Commit(genesisConfig.Number, true)
root, err := statedb.Commit(genesisConfig.Number, true, false)
if err != nil {
fmt.Printf("Failed to commit changes %v\n", err)
return err

@ -829,8 +829,7 @@ func inspectAccount(db *triedb.Database, start uint64, end uint64, address commo
func inspectStorage(db *triedb.Database, start uint64, end uint64, address common.Address, slot common.Hash, raw bool) error {
// The hash of storage slot key is utilized in the history
// rather than the raw slot key, make the conversion.
slotHash := crypto.Keccak256Hash(slot.Bytes())
stats, err := db.StorageHistory(address, slotHash, start, end)
stats, err := db.StorageHistory(address, slot, start, end)
if err != nil {
return err
}

@ -1474,7 +1474,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
log.Crit("Failed to write block into disk", "err", err)
}
// Commit all cached state changes into underlying memory database.
root, err := statedb.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()))
root, err := statedb.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()), bc.chainConfig.IsCancun(block.Number(), block.Time()))
if err != nil {
return err
}

@ -176,7 +176,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
blockchain.chainmu.MustLock()
rawdb.WriteTd(blockchain.db, block.Hash(), block.NumberU64(), new(big.Int).Add(block.Difficulty(), blockchain.GetTd(block.ParentHash(), block.NumberU64()-1)))
rawdb.WriteBlock(blockchain.db, block)
statedb.Commit(block.NumberU64(), false)
statedb.Commit(block.NumberU64(), false, false)
blockchain.chainmu.Unlock()
}
return nil

@ -353,7 +353,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse
}
// Write state changes to db
root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number))
root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number), config.IsCancun(b.header.Number, b.header.Time))
if err != nil {
panic(fmt.Sprintf("state write error: %v", err))
}
@ -459,7 +459,7 @@ func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine
}
// Write state changes to db
root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number))
root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number), config.IsCancun(b.header.Number, b.header.Time))
if err != nil {
panic(fmt.Sprintf("state write error: %v", err))
}

@ -142,7 +142,7 @@ func hashAlloc(ga *types.GenesisAlloc, isVerkle bool) (common.Hash, error) {
statedb.SetState(addr, key, value)
}
}
return statedb.Commit(0, false)
return statedb.Commit(0, false, false)
}
// flushAlloc is very similar with hash, but the main difference is all the generated
@ -165,7 +165,7 @@ func flushAlloc(ga *types.GenesisAlloc, db ethdb.Database, triedb *triedb.Databa
statedb.SetState(addr, key, value)
}
}
root, err := statedb.Commit(0, false)
root, err := statedb.Commit(0, false, false)
if err != nil {
return err
}

@ -440,10 +440,16 @@ func (s *stateObject) commitStorage(op *accountUpdate) {
op.storages = make(map[common.Hash][]byte)
}
op.storages[hash] = encode(val)
if op.storagesOrigin == nil {
op.storagesOrigin = make(map[common.Hash][]byte)
if op.storagesOriginByKey == nil {
op.storagesOriginByKey = make(map[common.Hash][]byte)
}
if op.storagesOriginByHash == nil {
op.storagesOriginByHash = make(map[common.Hash][]byte)
}
op.storagesOrigin[hash] = encode(s.originStorage[key])
origin := encode(s.originStorage[key])
op.storagesOriginByKey[key] = origin
op.storagesOriginByHash[hash] = origin
// Overwrite the clean value of storage slots
s.originStorage[key] = val

@ -59,7 +59,7 @@ func TestDump(t *testing.T) {
// write some of them to the trie
s.state.updateStateObject(obj1)
s.state.updateStateObject(obj2)
root, _ := s.state.Commit(0, false)
root, _ := s.state.Commit(0, false, false)
// check that DumpToCollector contains the state objects that are in trie
s.state, _ = New(root, tdb, nil)
@ -118,7 +118,7 @@ func TestIterativeDump(t *testing.T) {
// write some of them to the trie
s.state.updateStateObject(obj1)
s.state.updateStateObject(obj2)
root, _ := s.state.Commit(0, false)
root, _ := s.state.Commit(0, false, false)
s.state, _ = New(root, tdb, nil)
b := &bytes.Buffer{}
@ -144,7 +144,7 @@ func TestNull(t *testing.T) {
var value common.Hash
s.state.SetState(address, common.Hash{}, value)
s.state.Commit(0, false)
s.state.Commit(0, false, false)
if value := s.state.GetState(address, common.Hash{}); value != (common.Hash{}) {
t.Errorf("expected empty current value, got %x", value)

@ -1119,7 +1119,7 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root
// with their values be tracked as original value.
// In case (d), **original** account along with its storages should be deleted,
// with their values be tracked as original value.
func (s *StateDB) handleDestruction() (map[common.Hash]*accountDelete, []*trienode.NodeSet, error) {
func (s *StateDB) handleDestruction(noStorageWiping bool) (map[common.Hash]*accountDelete, []*trienode.NodeSet, error) {
var (
nodes []*trienode.NodeSet
buf = crypto.NewKeccakState()
@ -1148,6 +1148,9 @@ func (s *StateDB) handleDestruction() (map[common.Hash]*accountDelete, []*trieno
if prev.Root == types.EmptyRootHash {
continue
}
if noStorageWiping {
return nil, nil, fmt.Errorf("unexpected storage wiping, %x", addr)
}
// Remove storage slots belonging to the account.
slots, set, err := s.deleteStorage(addr, addrHash, prev.Root)
if err != nil {
@ -1168,7 +1171,7 @@ func (s *StateDB) GetTrie() Trie {
// commit gathers the state mutations accumulated along with the associated
// trie changes, resetting all internal flags with the new state as the base.
func (s *StateDB) commit(deleteEmptyObjects bool) (*stateUpdate, error) {
func (s *StateDB) commit(deleteEmptyObjects bool, noStorageWiping bool) (*stateUpdate, error) {
// Short circuit in case any database failure occurred earlier.
if s.dbErr != nil {
return nil, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr)
@ -1218,7 +1221,7 @@ func (s *StateDB) commit(deleteEmptyObjects bool) (*stateUpdate, error) {
// the same block, account deletions must be processed first. This ensures
// that the storage trie nodes deleted during destruction and recreated
// during subsequent resurrection can be combined correctly.
deletes, delNodes, err := s.handleDestruction()
deletes, delNodes, err := s.handleDestruction(noStorageWiping)
if err != nil {
return nil, err
}
@ -1310,13 +1313,14 @@ func (s *StateDB) commit(deleteEmptyObjects bool) (*stateUpdate, error) {
origin := s.originalRoot
s.originalRoot = root
return newStateUpdate(origin, root, deletes, updates, nodes), nil
return newStateUpdate(noStorageWiping, origin, root, deletes, updates, nodes), nil
}
// commitAndFlush is a wrapper of commit which also commits the state mutations
// to the configured data stores.
func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool) (*stateUpdate, error) {
ret, err := s.commit(deleteEmptyObjects)
func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (*stateUpdate, error) {
ret, err := s.commit(deleteEmptyObjects, noStorageWiping)
if err != nil {
return nil, err
}
@ -1351,7 +1355,7 @@ func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool) (*stateU
// If trie database is enabled, commit the state update as a new layer
if db := s.db.TrieDB(); db != nil {
start := time.Now()
set := triestate.New(ret.accountsOrigin, ret.storagesOrigin)
set := triestate.New(ret.accountsOrigin, ret.storagesOrigin, ret.rawStorageKey)
if err := db.Update(ret.root, ret.originRoot, block, ret.nodes, set); err != nil {
return nil, err
}
@ -1370,8 +1374,13 @@ func (s *StateDB) commitAndFlush(block uint64, deleteEmptyObjects bool) (*stateU
//
// The associated block number of the state transition is also provided
// for more chain context.
func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, error) {
ret, err := s.commitAndFlush(block, deleteEmptyObjects)
//
// noStorageWiping is a flag indicating whether storage wiping is permitted.
// Since self-destruction was deprecated with the Cancun fork and there are
// no empty accounts left that could be deleted by EIP-158, storage wiping
// should not occur.
func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool, noStorageWiping bool) (common.Hash, error) {
ret, err := s.commitAndFlush(block, deleteEmptyObjects, noStorageWiping)
if err != nil {
return common.Hash{}, err
}

@ -236,7 +236,7 @@ func (test *stateTest) run() bool {
} else {
state.IntermediateRoot(true) // call intermediateRoot at the transaction boundary
}
ret, err := state.commitAndFlush(0, true) // call commit at the block boundary
ret, err := state.commitAndFlush(0, true, false) // call commit at the block boundary
if err != nil {
panic(err)
}

@ -119,7 +119,7 @@ func TestIntermediateLeaks(t *testing.T) {
}
// Commit and cross check the databases.
transRoot, err := transState.Commit(0, false)
transRoot, err := transState.Commit(0, false, false)
if err != nil {
t.Fatalf("failed to commit transition state: %v", err)
}
@ -127,7 +127,7 @@ func TestIntermediateLeaks(t *testing.T) {
t.Errorf("can not commit trie %v to persistent database", transRoot.Hex())
}
finalRoot, err := finalState.Commit(0, false)
finalRoot, err := finalState.Commit(0, false, false)
if err != nil {
t.Fatalf("failed to commit final state: %v", err)
}
@ -240,7 +240,7 @@ func TestCopyWithDirtyJournal(t *testing.T) {
obj.data.Root = common.HexToHash("0xdeadbeef")
orig.updateStateObject(obj)
}
root, _ := orig.Commit(0, true)
root, _ := orig.Commit(0, true, false)
orig, _ = New(root, db, nil)
// modify all in memory without finalizing
@ -291,7 +291,7 @@ func TestCopyObjectState(t *testing.T) {
t.Fatalf("Error in test itself, the 'done' flag should not be set before Commit, have %v want %v", have, want)
}
}
orig.Commit(0, true)
orig.Commit(0, true, false)
for _, op := range cpy.mutations {
if have, want := op.applied, false; have != want {
t.Fatalf("Error: original state affected copy, have %v want %v", have, want)
@ -692,7 +692,7 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error {
func TestTouchDelete(t *testing.T) {
s := newStateEnv()
s.state.getOrNewStateObject(common.Address{})
root, _ := s.state.Commit(0, false)
root, _ := s.state.Commit(0, false, false)
s.state, _ = New(root, s.state.db, s.state.snaps)
snapshot := s.state.Snapshot()
@ -780,7 +780,7 @@ func TestCopyCommitCopy(t *testing.T) {
t.Fatalf("second copy committed storage slot mismatch: have %x, want %x", val, sval)
}
// Commit state, ensure states can be loaded from disk
root, _ := state.Commit(0, false)
root, _ := state.Commit(0, false, false)
state, _ = New(root, tdb, nil)
if balance := state.GetBalance(addr); balance.Cmp(uint256.NewInt(42)) != 0 {
t.Fatalf("state post-commit balance mismatch: have %v, want %v", balance, 42)
@ -894,11 +894,11 @@ func TestCommitCopy(t *testing.T) {
if val := state.GetCommittedState(addr, skey1); val != (common.Hash{}) {
t.Fatalf("initial committed storage slot mismatch: have %x, want %x", val, common.Hash{})
}
root, _ := state.Commit(0, true)
root, _ := state.Commit(0, true, false)
state, _ = New(root, db, nil)
state.SetState(addr, skey2, sval2)
state.Commit(1, true)
state.Commit(1, true, false)
// Copy the committed state database, the copied one is not fully functional.
copied := state.Copy()
@ -942,7 +942,7 @@ func TestDeleteCreateRevert(t *testing.T) {
addr := common.BytesToAddress([]byte("so"))
state.SetBalance(addr, uint256.NewInt(1), tracing.BalanceChangeUnspecified)
root, _ := state.Commit(0, false)
root, _ := state.Commit(0, false, false)
state, _ = New(root, state.db, state.snaps)
// Simulate self-destructing in one transaction, then create-reverting in another
@ -954,7 +954,7 @@ func TestDeleteCreateRevert(t *testing.T) {
state.RevertToSnapshot(id)
// Commit the entire state and make sure we don't crash and have the correct state
root, _ = state.Commit(0, true)
root, _ = state.Commit(0, true, false)
state, _ = New(root, state.db, state.snaps)
if state.getStateObject(addr) != nil {
@ -997,7 +997,7 @@ func testMissingTrieNodes(t *testing.T, scheme string) {
a2 := common.BytesToAddress([]byte("another"))
state.SetBalance(a2, uint256.NewInt(100), tracing.BalanceChangeUnspecified)
state.SetCode(a2, []byte{1, 2, 4})
root, _ = state.Commit(0, false)
root, _ = state.Commit(0, false, false)
t.Logf("root: %x", root)
// force-flush
tdb.Commit(root, false)
@ -1021,7 +1021,7 @@ func testMissingTrieNodes(t *testing.T, scheme string) {
}
// Modify the state
state.SetBalance(addr, uint256.NewInt(2), tracing.BalanceChangeUnspecified)
root, err := state.Commit(0, false)
root, err := state.Commit(0, false, false)
if err == nil {
t.Fatalf("expected error, got root :%x", root)
}
@ -1217,7 +1217,7 @@ func TestFlushOrderDataLoss(t *testing.T) {
state.SetState(common.Address{a}, common.Hash{a, s}, common.Hash{a, s})
}
}
root, err := state.Commit(0, false)
root, err := state.Commit(0, false, false)
if err != nil {
t.Fatalf("failed to commit state trie: %v", err)
}
@ -1293,7 +1293,7 @@ func TestDeleteStorage(t *testing.T) {
value := common.Hash(uint256.NewInt(uint64(10 * i)).Bytes32())
state.SetState(addr, slot, value)
}
root, _ := state.Commit(0, true)
root, _ := state.Commit(0, true, false)
// Init phase done, create two states, one with snap and one without
fastState, _ := New(root, db, snaps)
slowState, _ := New(root, db, nil)

@ -30,34 +30,54 @@ type contractCode struct {
// accountDelete represents an operation for deleting an Ethereum account.
type accountDelete struct {
address common.Address // address is the unique account identifier
origin []byte // origin is the original value of account data in slim-RLP encoding.
storagesOrigin map[common.Hash][]byte // storagesOrigin stores the original values of mutated slots in prefix-zero-trimmed RLP format.
address common.Address // address is the unique account identifier
origin []byte // origin is the original value of account data in slim-RLP encoding.
// storagesOrigin stores the original values of mutated slots in
// prefix-zero-trimmed RLP format. The map key refers to the **HASH**
// of the raw storage slot key.
storagesOrigin map[common.Hash][]byte
}
// accountUpdate represents an operation for updating an Ethereum account.
type accountUpdate struct {
address common.Address // address is the unique account identifier
data []byte // data is the slim-RLP encoded account data.
origin []byte // origin is the original value of account data in slim-RLP encoding.
code *contractCode // code represents mutated contract code; nil means it's not modified.
storages map[common.Hash][]byte // storages stores mutated slots in prefix-zero-trimmed RLP format.
storagesOrigin map[common.Hash][]byte // storagesOrigin stores the original values of mutated slots in prefix-zero-trimmed RLP format.
address common.Address // address is the unique account identifier
data []byte // data is the slim-RLP encoded account data.
origin []byte // origin is the original value of account data in slim-RLP encoding.
code *contractCode // code represents mutated contract code; nil means it's not modified.
storages map[common.Hash][]byte // storages stores mutated slots in prefix-zero-trimmed RLP format.
// storagesOriginByKey and storagesOriginByHash both store the original values
// of mutated slots in prefix-zero-trimmed RLP format. The difference is that
// storagesOriginByKey uses the **raw** storage slot key as the map ID, while
// storagesOriginByHash uses the **hash** of the storage slot key instead.
storagesOriginByKey map[common.Hash][]byte
storagesOriginByHash map[common.Hash][]byte
}
// stateUpdate represents the difference between two states resulting from state
// execution. It contains information about mutated contract codes, accounts,
// and storage slots, along with their original values.
type stateUpdate struct {
originRoot common.Hash // hash of the state before applying mutation
root common.Hash // hash of the state after applying mutation
destructs map[common.Hash]struct{} // destructs contains the list of destructed accounts
accounts map[common.Hash][]byte // accounts stores mutated accounts in 'slim RLP' encoding
accountsOrigin map[common.Address][]byte // accountsOrigin stores the original values of mutated accounts in 'slim RLP' encoding
storages map[common.Hash]map[common.Hash][]byte // storages stores mutated slots in 'prefix-zero-trimmed' RLP format
storagesOrigin map[common.Address]map[common.Hash][]byte // storagesOrigin stores the original values of mutated slots in 'prefix-zero-trimmed' RLP format
codes map[common.Address]contractCode // codes contains the set of dirty codes
nodes *trienode.MergedNodeSet // Aggregated dirty nodes caused by state changes
originRoot common.Hash // hash of the state before applying mutation
root common.Hash // hash of the state after applying mutation
destructs map[common.Hash]struct{} // destructs contains the list of destructed accounts
accounts map[common.Hash][]byte // accounts stores mutated accounts in 'slim RLP' encoding
accountsOrigin map[common.Address][]byte // accountsOrigin stores the original values of mutated accounts in 'slim RLP' encoding
// storages stores mutated slots in 'prefix-zero-trimmed' RLP format.
// The value is keyed by account hash and **storage slot key hash**.
storages map[common.Hash]map[common.Hash][]byte
// storagesOrigin stores the original values of mutated slots in
// 'prefix-zero-trimmed' RLP format.
// (a) the value is keyed by account hash and **storage slot key** if rawStorageKey is true;
// (b) the value is keyed by account hash and **storage slot key hash** if rawStorageKey is false;
storagesOrigin map[common.Address]map[common.Hash][]byte
rawStorageKey bool
codes map[common.Address]contractCode // codes contains the set of dirty codes
nodes *trienode.MergedNodeSet // Aggregated dirty nodes caused by state changes
}
// empty returns a flag indicating the state transition is empty or not.
@ -65,10 +85,13 @@ func (sc *stateUpdate) empty() bool {
return sc.originRoot == sc.root
}
// newStateUpdate constructs a state update object, representing the differences
// between two states by performing state execution. It aggregates the given
// account deletions and account updates to form a comprehensive state update.
func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common.Hash]*accountDelete, updates map[common.Hash]*accountUpdate, nodes *trienode.MergedNodeSet) *stateUpdate {
// newStateUpdate constructs a state update object by identifying the differences
// between two states through state execution. It combines the specified account
// deletions and account updates to create a complete state update.
//
// rawStorageKey is a flag indicating whether to use the raw storage slot key or
// the hash of the slot key for constructing state update object.
func newStateUpdate(rawStorageKey bool, originRoot common.Hash, root common.Hash, deletes map[common.Hash]*accountDelete, updates map[common.Hash]*accountUpdate, nodes *trienode.MergedNodeSet) *stateUpdate {
var (
destructs = make(map[common.Hash]struct{})
accounts = make(map[common.Hash][]byte)
@ -77,12 +100,14 @@ func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common
storagesOrigin = make(map[common.Address]map[common.Hash][]byte)
codes = make(map[common.Address]contractCode)
)
// Due to the fact that some accounts could be destructed and resurrected
// within the same block, the deletions must be aggregated first.
// Since some accounts might be destroyed and recreated within the same
// block, deletions must be aggregated first.
for addrHash, op := range deletes {
addr := op.address
destructs[addrHash] = struct{}{}
accountsOrigin[addr] = op.origin
// If storage wiping exists, the hash of the storage slot key must be used
if len(op.storagesOrigin) > 0 {
storagesOrigin[addr] = op.storagesOrigin
}
@ -105,13 +130,17 @@ func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common
if len(op.storages) > 0 {
storages[addrHash] = op.storages
}
if len(op.storagesOrigin) > 0 {
storageOriginSet := op.storagesOriginByHash
if rawStorageKey {
storageOriginSet = op.storagesOriginByKey
}
if len(storageOriginSet) > 0 {
origin := storagesOrigin[addr]
if origin == nil {
storagesOrigin[addr] = op.storagesOrigin
storagesOrigin[addr] = storageOriginSet
continue
}
for key, slot := range op.storagesOrigin {
for key, slot := range storageOriginSet {
if _, found := origin[key]; !found {
origin[key] = slot
}
@ -127,6 +156,7 @@ func newStateUpdate(originRoot common.Hash, root common.Hash, deletes map[common
accountsOrigin: accountsOrigin,
storages: storages,
storagesOrigin: storagesOrigin,
rawStorageKey: rawStorageKey,
codes: codes,
nodes: nodes,
}

@ -80,7 +80,7 @@ func makeTestState(scheme string) (ethdb.Database, Database, *triedb.Database, c
}
accounts = append(accounts, acc)
}
root, _ := state.Commit(0, false)
root, _ := state.Commit(0, false, false)
// Return the generated state
return db, sdb, nodeDb, root, accounts

@ -559,7 +559,7 @@ func TestOpenDrops(t *testing.T) {
statedb.AddBalance(crypto.PubkeyToAddress(overcapper.PublicKey), uint256.NewInt(10000000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(crypto.PubkeyToAddress(duplicater.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(crypto.PubkeyToAddress(repeater.PublicKey), uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
statedb.Commit(0, true)
statedb.Commit(0, true, false)
chain := &testBlockChain{
config: testChainConfig,
@ -678,7 +678,7 @@ func TestOpenIndex(t *testing.T) {
// Create a blob pool out of the pre-seeded data
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil)
statedb.AddBalance(addr, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.Commit(0, true)
statedb.Commit(0, true, false)
chain := &testBlockChain{
config: testChainConfig,
@ -780,7 +780,7 @@ func TestOpenHeap(t *testing.T) {
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.Commit(0, true)
statedb.Commit(0, true, false)
chain := &testBlockChain{
config: testChainConfig,
@ -860,7 +860,7 @@ func TestOpenCap(t *testing.T) {
statedb.AddBalance(addr1, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(addr2, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.AddBalance(addr3, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
statedb.Commit(0, true)
statedb.Commit(0, true, false)
chain := &testBlockChain{
config: testChainConfig,
@ -1283,7 +1283,7 @@ func TestAdd(t *testing.T) {
store.Put(blob)
}
}
statedb.Commit(0, true)
statedb.Commit(0, true, false)
store.Close()
// Create a blob pool out of the pre-seeded dats
@ -1356,7 +1356,7 @@ func benchmarkPoolPending(b *testing.B, datacap uint64) {
statedb.AddBalance(addr, uint256.NewInt(1_000_000_000), tracing.BalanceChangeUnspecified)
pool.add(tx)
}
statedb.Commit(0, true)
statedb.Commit(0, true, false)
defer pool.Close()
// Benchmark assembling the pending

@ -81,7 +81,7 @@ func TestAccountRange(t *testing.T) {
m[addr] = true
}
}
root, _ := sdb.Commit(0, true)
root, _ := sdb.Commit(0, true, false)
sdb, _ = state.New(root, statedb, nil)
trie, err := statedb.OpenTrie(root)
@ -139,7 +139,7 @@ func TestEmptyAccountRange(t *testing.T) {
st, _ = state.New(types.EmptyRootHash, statedb, nil)
)
// Commit(although nothing to flush) and re-init the statedb
st.Commit(0, true)
st.Commit(0, true, false)
st, _ = state.New(types.EmptyRootHash, statedb, nil)
results := st.RawDump(&state.DumpConfig{
@ -180,7 +180,7 @@ func TestStorageRangeAt(t *testing.T) {
for _, entry := range storage {
sdb.SetState(addr, *entry.Key, entry.Value)
}
root, _ := sdb.Commit(0, false)
root, _ := sdb.Commit(0, false, false)
sdb, _ = state.New(root, db, nil)
// Check a few combinations of limit and start/end.

@ -151,7 +151,7 @@ func (eth *Ethereum) hashState(ctx context.Context, block *types.Block, reexec u
return nil, nil, fmt.Errorf("processing block %d failed: %v", current.NumberU64(), err)
}
// Finalize the state so any modifications are written to the trie
root, err := statedb.Commit(current.NumberU64(), eth.blockchain.Config().IsEIP158(current.Number()))
root, err := statedb.Commit(current.NumberU64(), eth.blockchain.Config().IsEIP158(current.Number()), eth.blockchain.Config().IsCancun(current.Number(), current.Time()))
if err != nil {
return nil, nil, fmt.Errorf("stateAtBlock commit failed, number %d root %v: %w",
current.NumberU64(), current.Root().Hex(), err)

@ -319,7 +319,7 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh
st.StateDB.AddBalance(block.Coinbase(), new(uint256.Int), tracing.BalanceChangeUnspecified)
// Commit state mutations into database.
root, _ = st.StateDB.Commit(block.NumberU64(), config.IsEIP158(block.Number()))
root, _ = st.StateDB.Commit(block.NumberU64(), config.IsEIP158(block.Number()), config.IsCancun(block.Number(), block.Time()))
return st, root, err
}
@ -471,7 +471,7 @@ func MakePreState(db ethdb.Database, accounts types.GenesisAlloc, snapshotter bo
}
}
// Commit and re-open to start with a clean state.
root, _ := statedb.Commit(0, false)
root, _ := statedb.Commit(0, false, false)
// If snapshot is requested, initialize the snapshotter and use it in state.
var snaps *snapshot.Tree

@ -22,16 +22,18 @@ import "github.com/ethereum/go-ethereum/common"
// The value refers to the original content of state before the transition
// is made. Nil means that the state was not present previously.
type Set struct {
Accounts map[common.Address][]byte // Mutated account set, nil means the account was not present
Storages map[common.Address]map[common.Hash][]byte // Mutated storage set, nil means the slot was not present
size common.StorageSize // Approximate size of set
Accounts map[common.Address][]byte // Mutated account set, nil means the account was not present
Storages map[common.Address]map[common.Hash][]byte // Mutated storage set, nil means the slot was not present
RawStorageKey bool // Flag whether the storage set uses the raw slot key or the hash
size common.StorageSize // Approximate size of set
}
// New constructs the state set with provided data.
func New(accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte) *Set {
func New(accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, rawStorageKey bool) *Set {
return &Set{
Accounts: accounts,
Storages: storages,
Accounts: accounts,
Storages: storages,
RawStorageKey: rawStorageKey,
}
}

@ -92,29 +92,47 @@ func newCtx(stateRoot common.Hash) *genctx {
}
}
func (ctx *genctx) storageOriginSet(rawStorageKey bool, t *tester) map[common.Address]map[common.Hash][]byte {
if !rawStorageKey {
return ctx.storageOrigin
}
set := make(map[common.Address]map[common.Hash][]byte)
for addr, storage := range ctx.storageOrigin {
subset := make(map[common.Hash][]byte)
for hash, val := range storage {
key := t.hashPreimage(hash)
subset[key] = val
}
set[addr] = subset
}
return set
}
type tester struct {
db *Database
roots []common.Hash
preimages map[common.Hash]common.Address
accounts map[common.Hash][]byte
storages map[common.Hash]map[common.Hash][]byte
preimages map[common.Hash][]byte
// current state set
accounts map[common.Hash][]byte
storages map[common.Hash]map[common.Hash][]byte
// state snapshots
snapAccounts map[common.Hash]map[common.Hash][]byte
snapStorages map[common.Hash]map[common.Hash]map[common.Hash][]byte
}
func newTester(t *testing.T, historyLimit uint64) *tester {
func newTester(t *testing.T, historyLimit uint64, isVerkle bool) *tester {
var (
disk, _ = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false)
db = New(disk, &Config{
StateHistory: historyLimit,
CleanCacheSize: 16 * 1024,
DirtyCacheSize: 16 * 1024,
}, false)
}, isVerkle)
obj = &tester{
db: db,
preimages: make(map[common.Hash]common.Address),
preimages: make(map[common.Hash][]byte),
accounts: make(map[common.Hash][]byte),
storages: make(map[common.Hash]map[common.Hash][]byte),
snapAccounts: make(map[common.Hash]map[common.Hash][]byte),
@ -126,7 +144,8 @@ func newTester(t *testing.T, historyLimit uint64) *tester {
if len(obj.roots) != 0 {
parent = obj.roots[len(obj.roots)-1]
}
root, nodes, states := obj.generate(parent)
root, nodes, states := obj.generate(parent, i > 6)
if err := db.Update(root, parent, uint64(i), nodes, states); err != nil {
panic(fmt.Errorf("failed to update state changes, err: %w", err))
}
@ -135,6 +154,14 @@ func newTester(t *testing.T, historyLimit uint64) *tester {
return obj
}
func (t *tester) accountPreimage(hash common.Hash) common.Address {
return common.BytesToAddress(t.preimages[hash])
}
func (t *tester) hashPreimage(hash common.Hash) common.Hash {
return common.BytesToHash(t.preimages[hash])
}
func (t *tester) release() {
t.db.Close()
t.db.diskdb.Close()
@ -142,7 +169,7 @@ func (t *tester) release() {
func (t *tester) randAccount() (common.Address, []byte) {
for addrHash, account := range t.accounts {
return t.preimages[addrHash], account
return t.accountPreimage(addrHash), account
}
return common.Address{}, nil
}
@ -155,7 +182,9 @@ func (t *tester) generateStorage(ctx *genctx, addr common.Address) common.Hash {
)
for i := 0; i < 10; i++ {
v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testrand.Bytes(32)))
hash := testrand.Hash()
key := testrand.Bytes(32)
hash := crypto.Keccak256Hash(key)
t.preimages[hash] = key
storage[hash] = v
origin[hash] = nil
@ -184,7 +213,9 @@ func (t *tester) mutateStorage(ctx *genctx, addr common.Address, root common.Has
}
for i := 0; i < 3; i++ {
v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testrand.Bytes(32)))
hash := testrand.Hash()
key := testrand.Bytes(32)
hash := crypto.Keccak256Hash(key)
t.preimages[hash] = key
storage[hash] = v
origin[hash] = nil
@ -217,7 +248,7 @@ func (t *tester) clearStorage(ctx *genctx, addr common.Address, root common.Hash
return root
}
func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNodeSet, *triestate.Set) {
func (t *tester) generate(parent common.Hash, rawStorageKey bool) (common.Hash, *trienode.MergedNodeSet, *triestate.Set) {
var (
ctx = newCtx(parent)
dirties = make(map[common.Hash]struct{})
@ -228,9 +259,12 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode
// account creation
addr := testrand.Address()
addrHash := crypto.Keccak256Hash(addr.Bytes())
// short circuit if the account was already existent
if _, ok := t.accounts[addrHash]; ok {
continue
}
// short circuit if the account has been modified within the same transition
if _, ok := dirties[addrHash]; ok {
continue
}
@ -239,7 +273,7 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode
root := t.generateStorage(ctx, addr)
ctx.accounts[addrHash] = types.SlimAccountRLP(generateAccount(root))
ctx.accountOrigin[addr] = nil
t.preimages[addrHash] = addr
t.preimages[addrHash] = addr.Bytes()
case modifyAccountOp:
// account mutation
@ -248,6 +282,8 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode
continue
}
addrHash := crypto.Keccak256Hash(addr.Bytes())
// short circuit if the account has been modified within the same transition
if _, ok := dirties[addrHash]; ok {
continue
}
@ -267,6 +303,8 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode
continue
}
addrHash := crypto.Keccak256Hash(addr.Bytes())
// short circuit if the account has been modified within the same transition
if _, ok := dirties[addrHash]; ok {
continue
}
@ -310,7 +348,8 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode
delete(t.storages, addrHash)
}
}
return root, ctx.nodes, triestate.New(ctx.accountOrigin, ctx.storageOrigin)
storageOrigin := ctx.storageOriginSet(rawStorageKey, t)
return root, ctx.nodes, triestate.New(ctx.accountOrigin, storageOrigin, rawStorageKey)
}
// lastHash returns the latest root hash, or empty if nothing is cached.
@ -405,7 +444,7 @@ func TestDatabaseRollback(t *testing.T) {
}()
// Verify state histories
tester := newTester(t, 0)
tester := newTester(t, 0, false)
defer tester.release()
if err := tester.verifyHistory(); err != nil {
@ -439,7 +478,7 @@ func TestDatabaseRecoverable(t *testing.T) {
}()
var (
tester = newTester(t, 0)
tester = newTester(t, 0, false)
index = tester.bottomIndex()
)
defer tester.release()
@ -483,7 +522,7 @@ func TestDisable(t *testing.T) {
maxDiffLayers = 128
}()
tester := newTester(t, 0)
tester := newTester(t, 0, false)
defer tester.release()
stored := crypto.Keccak256Hash(rawdb.ReadAccountTrieNode(tester.db.diskdb, nil))
@ -525,7 +564,7 @@ func TestCommit(t *testing.T) {
maxDiffLayers = 128
}()
tester := newTester(t, 0)
tester := newTester(t, 0, false)
defer tester.release()
if err := tester.db.Commit(tester.lastHash(), false); err != nil {
@ -555,7 +594,7 @@ func TestJournal(t *testing.T) {
maxDiffLayers = 128
}()
tester := newTester(t, 0)
tester := newTester(t, 0, false)
defer tester.release()
if err := tester.db.Journal(tester.lastHash()); err != nil {
@ -585,7 +624,7 @@ func TestCorruptedJournal(t *testing.T) {
maxDiffLayers = 128
}()
tester := newTester(t, 0)
tester := newTester(t, 0, false)
defer tester.release()
if err := tester.db.Journal(tester.lastHash()); err != nil {
@ -633,7 +672,7 @@ func TestTailTruncateHistory(t *testing.T) {
maxDiffLayers = 128
}()
tester := newTester(t, 10)
tester := newTester(t, 10, false)
defer tester.release()
tester.db.Close()

@ -229,7 +229,7 @@ func (dl *diskLayer) revert(h *history) (*diskLayer, error) {
// Apply the reverse state changes upon the current state. This must
// be done before holding the lock in order to access state in "this"
// layer.
nodes, err := apply(dl.db, h.meta.parent, h.meta.root, h.accounts, h.storages)
nodes, err := apply(dl.db, h.meta.parent, h.meta.root, h.meta.version != stateHistoryV0, h.accounts, h.storages)
if err != nil {
return nil, err
}

@ -30,11 +30,12 @@ import (
// context wraps all fields for executing state diffs.
type context struct {
prevRoot common.Hash
postRoot common.Hash
accounts map[common.Address][]byte
storages map[common.Address]map[common.Hash][]byte
nodes *trienode.MergedNodeSet
prevRoot common.Hash
postRoot common.Hash
accounts map[common.Address][]byte
storages map[common.Address]map[common.Hash][]byte
nodes *trienode.MergedNodeSet
rawStorageKey bool
// TODO (rjl493456442) abstract out the state hasher
// for supporting verkle tree.
@ -43,18 +44,19 @@ type context struct {
// apply processes the given state diffs, updates the corresponding post-state
// and returns the trie nodes that have been modified.
func apply(db database.Database, prevRoot common.Hash, postRoot common.Hash, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte) (map[common.Hash]map[string]*trienode.Node, error) {
func apply(db database.Database, prevRoot common.Hash, postRoot common.Hash, rawStorageKey bool, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte) (map[common.Hash]map[string]*trienode.Node, error) {
tr, err := trie.New(trie.TrieID(postRoot), db)
if err != nil {
return nil, err
}
ctx := &context{
prevRoot: prevRoot,
postRoot: postRoot,
accounts: accounts,
storages: storages,
accountTrie: tr,
nodes: trienode.NewMergedNodeSet(),
prevRoot: prevRoot,
postRoot: postRoot,
accounts: accounts,
storages: storages,
accountTrie: tr,
rawStorageKey: rawStorageKey,
nodes: trienode.NewMergedNodeSet(),
}
for addr, account := range accounts {
var err error
@ -109,11 +111,15 @@ func updateAccount(ctx *context, db database.Database, addr common.Address) erro
return err
}
for key, val := range ctx.storages[addr] {
tkey := key
if ctx.rawStorageKey {
tkey = h.hash(key.Bytes())
}
var err error
if len(val) == 0 {
err = st.Delete(key.Bytes())
err = st.Delete(tkey.Bytes())
} else {
err = st.Update(key.Bytes(), val)
err = st.Update(tkey.Bytes(), val)
}
if err != nil {
return err
@ -166,7 +172,11 @@ func deleteAccount(ctx *context, db database.Database, addr common.Address) erro
if len(val) != 0 {
return errors.New("expect storage deletion")
}
if err := st.Delete(key.Bytes()); err != nil {
tkey := key
if ctx.rawStorageKey {
tkey = h.hash(key.Bytes())
}
if err := st.Delete(tkey.Bytes()); err != nil {
return err
}
}

@ -69,7 +69,8 @@ const (
slotIndexSize = common.HashLength + 5 // The length of encoded slot index
historyMetaSize = 9 + 2*common.HashLength // The length of encoded history meta
stateHistoryVersion = uint8(0) // initial version of state history structure.
stateHistoryV0 = uint8(0) // initial version of state history structure
stateHistoryV1 = uint8(1) // use the storage slot raw key as the identifier instead of the key hash
)
// Each state history entry is consisted of five elements:
@ -170,15 +171,18 @@ func (i *accountIndex) decode(blob []byte) {
// slotIndex describes the metadata belonging to a storage slot.
type slotIndex struct {
hash common.Hash // The hash of slot key
length uint8 // The length of storage slot, up to 32 bytes defined in protocol
offset uint32 // The offset of item in storage slot data table
// the identifier of the storage slot. Specifically
// in v0, it's the hash of the raw storage slot key (32 bytes);
// in v1, it's the raw storage slot key (32 bytes);
id common.Hash
length uint8 // The length of storage slot, up to 32 bytes defined in protocol
offset uint32 // The offset of item in storage slot data table
}
// encode packs slot index into byte stream.
func (i *slotIndex) encode() []byte {
var buf [slotIndexSize]byte
copy(buf[:common.HashLength], i.hash.Bytes())
copy(buf[:common.HashLength], i.id.Bytes())
buf[common.HashLength] = i.length
binary.BigEndian.PutUint32(buf[common.HashLength+1:], i.offset)
return buf[:]
@ -186,7 +190,7 @@ func (i *slotIndex) encode() []byte {
// decode unpack slot index from the byte stream.
func (i *slotIndex) decode(blob []byte) {
i.hash = common.BytesToHash(blob[:common.HashLength])
i.id = common.BytesToHash(blob[:common.HashLength])
i.length = blob[common.HashLength]
i.offset = binary.BigEndian.Uint32(blob[common.HashLength+1:])
}
@ -215,7 +219,7 @@ func (m *meta) decode(blob []byte) error {
return errors.New("no version tag")
}
switch blob[0] {
case stateHistoryVersion:
case stateHistoryV0, stateHistoryV1:
if len(blob) != historyMetaSize {
return fmt.Errorf("invalid state history meta, len: %d", len(blob))
}
@ -255,9 +259,13 @@ func newHistory(root common.Hash, parent common.Hash, block uint64, states *trie
slices.SortFunc(slist, common.Hash.Cmp)
storageList[addr] = slist
}
version := stateHistoryV0
if states.RawStorageKey {
version = stateHistoryV1
}
return &history{
meta: &meta{
version: stateHistoryVersion,
version: version,
parent: parent,
root: root,
block: block,
@ -290,7 +298,7 @@ func (h *history) encode() ([]byte, []byte, []byte, []byte) {
// Encode storage slots in order
for _, slotHash := range h.storageList[addr] {
sIndex := slotIndex{
hash: slotHash,
id: slotHash,
length: uint8(len(slots[slotHash])),
offset: uint32(len(storageData)),
}
@ -378,7 +386,7 @@ func (r *decoder) readAccount(pos int) (accountIndex, []byte, error) {
// readStorage parses the storage slots from the byte stream with specified account.
func (r *decoder) readStorage(accIndex accountIndex) ([]common.Hash, map[common.Hash][]byte, error) {
var (
last common.Hash
last *common.Hash
count = int(accIndex.storageSlots)
list = make([]common.Hash, 0, count)
storage = make(map[common.Hash][]byte, count)
@ -403,8 +411,10 @@ func (r *decoder) readStorage(accIndex accountIndex) ([]common.Hash, map[common.
}
index.decode(r.storageIndexes[start:end])
if bytes.Compare(last.Bytes(), index.hash.Bytes()) >= 0 {
return nil, nil, errors.New("storage slot is not in order")
if last != nil {
if bytes.Compare(last.Bytes(), index.id.Bytes()) >= 0 {
return nil, nil, fmt.Errorf("storage slot is not in order, last: %x, current: %x", *last, index.id)
}
}
if index.offset != r.lastSlotDataRead {
return nil, nil, errors.New("storage data buffer is gapped")
@ -413,10 +423,10 @@ func (r *decoder) readStorage(accIndex accountIndex) ([]common.Hash, map[common.
if uint32(len(r.storageData)) < sEnd {
return nil, nil, errors.New("storage data buffer is corrupted")
}
storage[index.hash] = r.storageData[r.lastSlotDataRead:sEnd]
list = append(list, index.hash)
storage[index.id] = r.storageData[r.lastSlotDataRead:sEnd]
list = append(list, index.id)
last = index.hash
last = &index.id
r.lastSlotIndexRead = end
r.lastSlotDataRead = sEnd
}

@ -21,6 +21,7 @@ import (
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
)
@ -109,12 +110,17 @@ func accountHistory(freezer ethdb.AncientReader, address common.Address, start,
// storageHistory inspects the storage history within the range.
func storageHistory(freezer ethdb.AncientReader, address common.Address, slot common.Hash, start uint64, end uint64) (*HistoryStats, error) {
slotHash := crypto.Keccak256Hash(slot.Bytes())
return inspectHistory(freezer, start, end, func(h *history, stats *HistoryStats) {
slots, exists := h.storages[address]
if !exists {
return
}
blob, exists := slots[slot]
key := slotHash
if h.meta.version != stateHistoryV0 {
key = slot
}
blob, exists := slots[key]
if !exists {
return
}

@ -47,7 +47,7 @@ func randomStateSet(n int) *triestate.Set {
account := generateAccount(types.EmptyRootHash)
accounts[addr] = types.SlimAccountRLP(account)
}
return triestate.New(accounts, storages)
return triestate.New(accounts, storages, false)
}
func makeHistory() *history {

@ -47,7 +47,8 @@ var (
//
// - Version 0: initial version
// - Version 1: storage.Incomplete field is removed
const journalVersion uint64 = 1
// - Version 2: a flag has been added to indicate whether the storage slot key is the raw key or a hash
const journalVersion uint64 = 2
// journalNode represents a trie node persisted in the journal.
type journalNode struct {
@ -196,6 +197,10 @@ func (db *Database) loadDiffLayer(parent layer, r *rlp.Stream) (layer, error) {
if err := r.Decode(&block); err != nil {
return nil, fmt.Errorf("load block number: %v", err)
}
var rawStorageKey bool
if err := r.Decode(&rawStorageKey); err != nil {
return nil, fmt.Errorf("load raw storage key flag: %v", err)
}
// Read in-memory trie nodes from journal
var encoded []journalNodes
if err := r.Decode(&encoded); err != nil {
@ -240,7 +245,7 @@ func (db *Database) loadDiffLayer(parent layer, r *rlp.Stream) (layer, error) {
}
storages[entry.Account] = set
}
return db.loadDiffLayer(newDiffLayer(parent, root, parent.stateID()+1, block, nodes, triestate.New(accounts, storages)), r)
return db.loadDiffLayer(newDiffLayer(parent, root, parent.stateID()+1, block, nodes, triestate.New(accounts, storages, rawStorageKey)), r)
}
// journal implements the layer interface, marshaling the un-flushed trie nodes
@ -294,6 +299,9 @@ func (dl *diffLayer) journal(w io.Writer) error {
if err := rlp.Encode(w, dl.block); err != nil {
return err
}
if err := rlp.Encode(w, dl.states.RawStorageKey); err != nil {
return err
}
// Write the accumulated trie nodes into buffer
nodes := make([]journalNodes, 0, len(dl.nodes))
for owner, subset := range dl.nodes {

Loading…
Cancel
Save