diff --git a/core/state/metrics.go b/core/state/metrics.go index 64c651461e..7447e44dfd 100644 --- a/core/state/metrics.go +++ b/core/state/metrics.go @@ -33,5 +33,4 @@ var ( slotDeletionTimer = metrics.NewRegisteredResettingTimer("state/delete/storage/timer", nil) slotDeletionCount = metrics.NewRegisteredMeter("state/delete/storage/slot", nil) slotDeletionSize = metrics.NewRegisteredMeter("state/delete/storage/size", nil) - slotDeletionSkip = metrics.NewRegisteredGauge("state/delete/storage/skip", nil) ) diff --git a/core/state/statedb.go b/core/state/statedb.go index a4b8cf93e2..4d1163d3c6 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -36,12 +36,6 @@ import ( "github.com/holiman/uint256" ) -const ( - // storageDeleteLimit denotes the highest permissible memory allocation - // employed for contract storage deletion. - storageDeleteLimit = 512 * 1024 * 1024 -) - type revision struct { id int journalIndex int @@ -949,10 +943,10 @@ func (s *StateDB) clearJournalAndRefund() { // of a specific account. It leverages the associated state snapshot for fast // storage iteration and constructs trie node deletion markers by creating // stack trie with iterated slots. -func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (bool, common.StorageSize, map[common.Hash][]byte, *trienode.NodeSet, error) { +func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (common.StorageSize, map[common.Hash][]byte, *trienode.NodeSet, error) { iter, err := s.snaps.StorageIterator(s.originalRoot, addrHash, common.Hash{}) if err != nil { - return false, 0, nil, nil, err + return 0, nil, nil, err } defer iter.Release() @@ -968,40 +962,37 @@ func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (boo }) stack := trie.NewStackTrie(options) for iter.Next() { - if size > storageDeleteLimit { - return true, size, nil, nil, nil - } slot := common.CopyBytes(iter.Slot()) if err := iter.Error(); err != nil { // error might occur after Slot function - return false, 0, nil, nil, err + return 0, nil, nil, err } size += common.StorageSize(common.HashLength + len(slot)) slots[iter.Hash()] = slot if err := stack.Update(iter.Hash().Bytes(), slot); err != nil { - return false, 0, nil, nil, err + return 0, nil, nil, err } } if err := iter.Error(); err != nil { // error might occur during iteration - return false, 0, nil, nil, err + return 0, nil, nil, err } if stack.Hash() != root { - return false, 0, nil, nil, fmt.Errorf("snapshot is not matched, exp %x, got %x", root, stack.Hash()) + return 0, nil, nil, fmt.Errorf("snapshot is not matched, exp %x, got %x", root, stack.Hash()) } - return false, size, slots, nodes, nil + return size, slots, nodes, nil } // slowDeleteStorage serves as a less-efficient alternative to "fastDeleteStorage," // employed when the associated state snapshot is not available. It iterates the // storage slots along with all internal trie nodes via trie directly. -func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, common.StorageSize, map[common.Hash][]byte, *trienode.NodeSet, error) { +func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (common.StorageSize, map[common.Hash][]byte, *trienode.NodeSet, error) { tr, err := s.db.OpenStorageTrie(s.originalRoot, addr, root, s.trie) if err != nil { - return false, 0, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err) + return 0, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err) } it, err := tr.NodeIterator(nil) if err != nil { - return false, 0, nil, nil, fmt.Errorf("failed to open storage iterator, err: %w", err) + return 0, nil, nil, fmt.Errorf("failed to open storage iterator, err: %w", err) } var ( size common.StorageSize @@ -1009,9 +1000,6 @@ func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, r slots = make(map[common.Hash][]byte) ) for it.Next(true) { - if size > storageDeleteLimit { - return true, size, nil, nil, nil - } if it.Leaf() { slots[common.BytesToHash(it.LeafKey())] = common.CopyBytes(it.LeafBlob()) size += common.StorageSize(common.HashLength + len(it.LeafBlob())) @@ -1024,9 +1012,9 @@ func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, r nodes.AddNode(it.Path(), trienode.NewDeleted()) } if err := it.Error(); err != nil { - return false, 0, nil, nil, err + return 0, nil, nil, err } - return false, size, slots, nodes, nil + return size, slots, nodes, nil } // deleteStorage is designed to delete the storage trie of a designated account. @@ -1034,31 +1022,27 @@ func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, r // potentially leading to an out-of-memory panic. The function will make an attempt // to utilize an efficient strategy if the associated state snapshot is reachable; // otherwise, it will resort to a less-efficient approach. -func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, map[common.Hash][]byte, *trienode.NodeSet, error) { +func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (map[common.Hash][]byte, *trienode.NodeSet, error) { var ( - start = time.Now() - err error - aborted bool - size common.StorageSize - slots map[common.Hash][]byte - nodes *trienode.NodeSet + start = time.Now() + err error + size common.StorageSize + slots map[common.Hash][]byte + nodes *trienode.NodeSet ) // The fast approach can be failed if the snapshot is not fully // generated, or it's internally corrupted. Fallback to the slow // one just in case. if s.snap != nil { - aborted, size, slots, nodes, err = s.fastDeleteStorage(addrHash, root) + size, slots, nodes, err = s.fastDeleteStorage(addrHash, root) } if s.snap == nil || err != nil { - aborted, size, slots, nodes, err = s.slowDeleteStorage(addr, addrHash, root) + size, slots, nodes, err = s.slowDeleteStorage(addr, addrHash, root) } if err != nil { - return false, nil, nil, err + return nil, nil, err } if metrics.EnabledExpensive { - if aborted { - slotDeletionSkip.Inc(1) - } n := int64(len(slots)) slotDeletionMaxCount.UpdateIfGt(int64(len(slots))) @@ -1068,7 +1052,7 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root slotDeletionCount.Mark(n) slotDeletionSize.Mark(int64(size)) } - return aborted, slots, nodes, nil + return slots, nodes, nil } // handleDestruction processes all destruction markers and deletes the account @@ -1095,13 +1079,12 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root // // In case (d), **original** account along with its storages should be deleted, // with their values be tracked as original value. -func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.Address]struct{}, error) { +func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) error { // Short circuit if geth is running with hash mode. This procedure can consume // considerable time and storage deletion isn't supported in hash mode, thus // preemptively avoiding unnecessary expenses. - incomplete := make(map[common.Address]struct{}) if s.db.TrieDB().Scheme() == rawdb.HashScheme { - return incomplete, nil + return nil } for addr, prev := range s.stateObjectsDestruct { // The original account was non-existing, and it's marked as destructed @@ -1124,18 +1107,9 @@ func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.A continue } // Remove storage slots belong to the account. - aborted, slots, set, err := s.deleteStorage(addr, addrHash, prev.Root) + slots, set, err := s.deleteStorage(addr, addrHash, prev.Root) if err != nil { - return nil, fmt.Errorf("failed to delete storage, err: %w", err) - } - // The storage is too huge to handle, skip it but mark as incomplete. - // For case (d), the account is resurrected might with a few slots - // created. In this case, wipe the entire storage state diff because - // of aborted deletion. - if aborted { - incomplete[addr] = struct{}{} - delete(s.storagesOrigin, addr) - continue + return fmt.Errorf("failed to delete storage, err: %w", err) } if s.storagesOrigin[addr] == nil { s.storagesOrigin[addr] = slots @@ -1147,10 +1121,10 @@ func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.A } } if err := nodes.Merge(set); err != nil { - return nil, err + return err } } - return incomplete, nil + return nil } // Commit writes the state to the underlying in-memory trie database. @@ -1179,8 +1153,7 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er codeWriter = s.db.DiskDB().NewBatch() ) // Handle all state deletions first - incomplete, err := s.handleDestruction(nodes) - if err != nil { + if err := s.handleDestruction(nodes); err != nil { return common.Hash{}, err } // Handle all state updates afterwards @@ -1276,7 +1249,7 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er } if root != origin { start := time.Now() - set := triestate.New(s.accountsOrigin, s.storagesOrigin, incomplete) + set := triestate.New(s.accountsOrigin, s.storagesOrigin) if err := s.db.TrieDB().Update(root, origin, block, nodes, set); err != nil { return common.Hash{}, err } diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go index cd86a7f4b6..3649b0ac58 100644 --- a/core/state/statedb_test.go +++ b/core/state/statedb_test.go @@ -1161,12 +1161,12 @@ func TestDeleteStorage(t *testing.T) { obj := fastState.getOrNewStateObject(addr) storageRoot := obj.data.Root - _, _, fastNodes, err := fastState.deleteStorage(addr, crypto.Keccak256Hash(addr[:]), storageRoot) + _, fastNodes, err := fastState.deleteStorage(addr, crypto.Keccak256Hash(addr[:]), storageRoot) if err != nil { t.Fatal(err) } - _, _, slowNodes, err := slowState.deleteStorage(addr, crypto.Keccak256Hash(addr[:]), storageRoot) + _, slowNodes, err := slowState.deleteStorage(addr, crypto.Keccak256Hash(addr[:]), storageRoot) if err != nil { t.Fatal(err) } diff --git a/trie/triestate/state.go b/trie/triestate/state.go index 4c47e9c397..aa4d32f852 100644 --- a/trie/triestate/state.go +++ b/trie/triestate/state.go @@ -59,18 +59,16 @@ type TrieLoader interface { // The value refers to the original content of state before the transition // is made. Nil means that the state was not present previously. type Set struct { - Accounts map[common.Address][]byte // Mutated account set, nil means the account was not present - Storages map[common.Address]map[common.Hash][]byte // Mutated storage set, nil means the slot was not present - Incomplete map[common.Address]struct{} // Indicator whether the storage is incomplete due to large deletion - size common.StorageSize // Approximate size of set + Accounts map[common.Address][]byte // Mutated account set, nil means the account was not present + Storages map[common.Address]map[common.Hash][]byte // Mutated storage set, nil means the slot was not present + size common.StorageSize // Approximate size of set } // New constructs the state set with provided data. -func New(accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, incomplete map[common.Address]struct{}) *Set { +func New(accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte) *Set { return &Set{ - Accounts: accounts, - Storages: storages, - Incomplete: incomplete, + Accounts: accounts, + Storages: storages, } } @@ -88,7 +86,6 @@ func (s *Set) Size() common.StorageSize { } s.size += common.StorageSize(common.AddressLength) } - s.size += common.StorageSize(common.AddressLength * len(s.Incomplete)) return s.size } diff --git a/triedb/pathdb/database.go b/triedb/pathdb/database.go index 3e8e83a00c..b1e01abac4 100644 --- a/triedb/pathdb/database.go +++ b/triedb/pathdb/database.go @@ -403,9 +403,6 @@ func (db *Database) Recoverable(root common.Hash) bool { if m.parent != root { return errors.New("unexpected state history") } - if len(m.incomplete) > 0 { - return errors.New("incomplete state history") - } root = m.root return nil }) == nil diff --git a/triedb/pathdb/database_test.go b/triedb/pathdb/database_test.go index df69942e9a..2e7e1bef05 100644 --- a/triedb/pathdb/database_test.go +++ b/triedb/pathdb/database_test.go @@ -299,7 +299,7 @@ func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNode } } } - return root, ctx.nodes, triestate.New(ctx.accountOrigin, ctx.storageOrigin, nil) + return root, ctx.nodes, triestate.New(ctx.accountOrigin, ctx.storageOrigin) } // lastRoot returns the latest root hash, or empty if nothing is cached. @@ -543,7 +543,7 @@ func TestCorruptedJournal(t *testing.T) { // Mutate the journal in disk, it should be regarded as invalid blob := rawdb.ReadTrieJournal(tester.db.diskdb) - blob[0] = 1 + blob[0] = 0xa rawdb.WriteTrieJournal(tester.db.diskdb, blob) // Verify states, all not-yet-written states should be discarded diff --git a/triedb/pathdb/disklayer.go b/triedb/pathdb/disklayer.go index ef697cbce8..777e4ec8a7 100644 --- a/triedb/pathdb/disklayer.go +++ b/triedb/pathdb/disklayer.go @@ -17,7 +17,6 @@ package pathdb import ( - "errors" "fmt" "sync" @@ -239,12 +238,6 @@ func (dl *diskLayer) revert(h *history, loader triestate.TrieLoader) (*diskLayer if h.meta.root != dl.rootHash() { return nil, errUnexpectedHistory } - // Reject if the provided state history is incomplete. It's due to - // a large construct SELF-DESTRUCT which can't be handled because - // of memory limitation. - if len(h.meta.incomplete) > 0 { - return nil, errors.New("incomplete state history") - } if dl.id == 0 { return nil, fmt.Errorf("%w: zero state id", errStateUnrecoverable) } diff --git a/triedb/pathdb/history.go b/triedb/pathdb/history.go index 051e122bec..68fb4809f0 100644 --- a/triedb/pathdb/history.go +++ b/triedb/pathdb/history.go @@ -66,7 +66,7 @@ import ( const ( accountIndexSize = common.AddressLength + 13 // The length of encoded account index slotIndexSize = common.HashLength + 5 // The length of encoded slot index - historyMetaSize = 9 + 2*common.HashLength // The length of fixed size part of meta object + historyMetaSize = 9 + 2*common.HashLength // The length of encoded history meta stateHistoryVersion = uint8(0) // initial version of state history structure. ) @@ -192,23 +192,19 @@ func (i *slotIndex) decode(blob []byte) { // meta describes the meta data of state history object. type meta struct { - version uint8 // version tag of history object - parent common.Hash // prev-state root before the state transition - root common.Hash // post-state root after the state transition - block uint64 // associated block number - incomplete []common.Address // list of address whose storage set is incomplete + version uint8 // version tag of history object + parent common.Hash // prev-state root before the state transition + root common.Hash // post-state root after the state transition + block uint64 // associated block number } // encode packs the meta object into byte stream. func (m *meta) encode() []byte { - buf := make([]byte, historyMetaSize+len(m.incomplete)*common.AddressLength) + buf := make([]byte, historyMetaSize) buf[0] = m.version copy(buf[1:1+common.HashLength], m.parent.Bytes()) copy(buf[1+common.HashLength:1+2*common.HashLength], m.root.Bytes()) binary.BigEndian.PutUint64(buf[1+2*common.HashLength:historyMetaSize], m.block) - for i, h := range m.incomplete { - copy(buf[i*common.AddressLength+historyMetaSize:], h.Bytes()) - } return buf[:] } @@ -219,20 +215,13 @@ func (m *meta) decode(blob []byte) error { } switch blob[0] { case stateHistoryVersion: - if len(blob) < historyMetaSize { + if len(blob) != historyMetaSize { return fmt.Errorf("invalid state history meta, len: %d", len(blob)) } - if (len(blob)-historyMetaSize)%common.AddressLength != 0 { - return fmt.Errorf("corrupted state history meta, len: %d", len(blob)) - } m.version = blob[0] m.parent = common.BytesToHash(blob[1 : 1+common.HashLength]) m.root = common.BytesToHash(blob[1+common.HashLength : 1+2*common.HashLength]) m.block = binary.BigEndian.Uint64(blob[1+2*common.HashLength : historyMetaSize]) - for pos := historyMetaSize; pos < len(blob); { - m.incomplete = append(m.incomplete, common.BytesToAddress(blob[pos:pos+common.AddressLength])) - pos += common.AddressLength - } return nil default: return fmt.Errorf("unknown version %d", blob[0]) @@ -257,7 +246,6 @@ func newHistory(root common.Hash, parent common.Hash, block uint64, states *trie var ( accountList []common.Address storageList = make(map[common.Address][]common.Hash) - incomplete []common.Address ) for addr := range states.Accounts { accountList = append(accountList, addr) @@ -272,18 +260,12 @@ func newHistory(root common.Hash, parent common.Hash, block uint64, states *trie slices.SortFunc(slist, common.Hash.Cmp) storageList[addr] = slist } - for addr := range states.Incomplete { - incomplete = append(incomplete, addr) - } - slices.SortFunc(incomplete, common.Address.Cmp) - return &history{ meta: &meta{ - version: stateHistoryVersion, - parent: parent, - root: root, - block: block, - incomplete: incomplete, + version: stateHistoryVersion, + parent: parent, + root: root, + block: block, }, accounts: states.Accounts, accountList: accountList, diff --git a/triedb/pathdb/history_test.go b/triedb/pathdb/history_test.go index a3257441de..ab0d44777d 100644 --- a/triedb/pathdb/history_test.go +++ b/triedb/pathdb/history_test.go @@ -47,7 +47,7 @@ func randomStateSet(n int) *triestate.Set { account := generateAccount(types.EmptyRootHash) accounts[addr] = types.SlimAccountRLP(account) } - return triestate.New(accounts, storages, nil) + return triestate.New(accounts, storages) } func makeHistory() *history { diff --git a/triedb/pathdb/journal.go b/triedb/pathdb/journal.go index ac770763e3..3a0b7ebae2 100644 --- a/triedb/pathdb/journal.go +++ b/triedb/pathdb/journal.go @@ -41,7 +41,13 @@ var ( errUnmatchedJournal = errors.New("unmatched journal") ) -const journalVersion uint64 = 0 +// journalVersion ensures that an incompatible journal is detected and discarded. +// +// Changelog: +// +// - Version 0: initial version +// - Version 1: storage.Incomplete field is removed +const journalVersion uint64 = 1 // journalNode represents a trie node persisted in the journal. type journalNode struct { @@ -64,10 +70,9 @@ type journalAccounts struct { // journalStorage represents a list of storage slots belong to an account. type journalStorage struct { - Incomplete bool - Account common.Address - Hashes []common.Hash - Slots [][]byte + Account common.Address + Hashes []common.Hash + Slots [][]byte } // loadJournal tries to parse the layer journal from the disk. @@ -209,11 +214,10 @@ func (db *Database) loadDiffLayer(parent layer, r *rlp.Stream) (layer, error) { } // Read state changes from journal var ( - jaccounts journalAccounts - jstorages []journalStorage - accounts = make(map[common.Address][]byte) - storages = make(map[common.Address]map[common.Hash][]byte) - incomplete = make(map[common.Address]struct{}) + jaccounts journalAccounts + jstorages []journalStorage + accounts = make(map[common.Address][]byte) + storages = make(map[common.Address]map[common.Hash][]byte) ) if err := r.Decode(&jaccounts); err != nil { return nil, fmt.Errorf("load diff accounts: %v", err) @@ -233,12 +237,9 @@ func (db *Database) loadDiffLayer(parent layer, r *rlp.Stream) (layer, error) { set[h] = nil } } - if entry.Incomplete { - incomplete[entry.Account] = struct{}{} - } storages[entry.Account] = set } - return db.loadDiffLayer(newDiffLayer(parent, root, parent.stateID()+1, block, nodes, triestate.New(accounts, storages, incomplete)), r) + return db.loadDiffLayer(newDiffLayer(parent, root, parent.stateID()+1, block, nodes, triestate.New(accounts, storages)), r) } // journal implements the layer interface, marshaling the un-flushed trie nodes @@ -316,9 +317,6 @@ func (dl *diffLayer) journal(w io.Writer) error { storage := make([]journalStorage, 0, len(dl.states.Storages)) for addr, slots := range dl.states.Storages { entry := journalStorage{Account: addr} - if _, ok := dl.states.Incomplete[addr]; ok { - entry.Incomplete = true - } for slotHash, slot := range slots { entry.Hashes = append(entry.Hashes, slotHash) entry.Slots = append(entry.Slots, slot)