mirror of https://github.com/ethereum/go-ethereum
core, trie, triedb: minor changes from snapshot integration (#30599)
This change ports some non-important changes from https://github.com/ethereum/go-ethereum/pull/30159, including interface renaming and some trivial refactorings.pull/30628/head
parent
3ff73d46b3
commit
b6c62d5887
@ -1,53 +0,0 @@ |
||||
// Copyright 2023 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
|
||||
|
||||
package triestate |
||||
|
||||
import "github.com/ethereum/go-ethereum/common" |
||||
|
||||
// Set represents a collection of mutated states during a state transition.
|
||||
// The value refers to the original content of state before the transition
|
||||
// is made. Nil means that the state was not present previously.
|
||||
type Set struct { |
||||
Accounts map[common.Address][]byte // Mutated account set, nil means the account was not present
|
||||
Storages map[common.Address]map[common.Hash][]byte // Mutated storage set, nil means the slot was not present
|
||||
size common.StorageSize // Approximate size of set
|
||||
} |
||||
|
||||
// New constructs the state set with provided data.
|
||||
func New(accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte) *Set { |
||||
return &Set{ |
||||
Accounts: accounts, |
||||
Storages: storages, |
||||
} |
||||
} |
||||
|
||||
// Size returns the approximate memory size occupied by the set.
|
||||
func (s *Set) Size() common.StorageSize { |
||||
if s.size != 0 { |
||||
return s.size |
||||
} |
||||
for _, account := range s.Accounts { |
||||
s.size += common.StorageSize(common.AddressLength + len(account)) |
||||
} |
||||
for _, slots := range s.Storages { |
||||
for _, val := range slots { |
||||
s.size += common.StorageSize(common.HashLength + len(val)) |
||||
} |
||||
s.size += common.StorageSize(common.AddressLength) |
||||
} |
||||
return s.size |
||||
} |
@ -0,0 +1,141 @@ |
||||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package pathdb |
||||
|
||||
import ( |
||||
"fmt" |
||||
"time" |
||||
|
||||
"github.com/VictoriaMetrics/fastcache" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/core/rawdb" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/trie/trienode" |
||||
) |
||||
|
||||
// buffer is a collection of modified states along with the modified trie nodes.
|
||||
// They are cached here to aggregate the disk write. The content of the buffer
|
||||
// must be checked before diving into disk (since it basically is not yet written
|
||||
// data).
|
||||
type buffer struct { |
||||
layers uint64 // The number of diff layers aggregated inside
|
||||
limit uint64 // The maximum memory allowance in bytes
|
||||
nodes *nodeSet // Aggregated trie node set
|
||||
} |
||||
|
||||
// newBuffer initializes the buffer with the provided states and trie nodes.
|
||||
func newBuffer(limit int, nodes *nodeSet, layers uint64) *buffer { |
||||
// Don't panic for lazy users if any provided set is nil
|
||||
if nodes == nil { |
||||
nodes = newNodeSet(nil) |
||||
} |
||||
return &buffer{ |
||||
layers: layers, |
||||
limit: uint64(limit), |
||||
nodes: nodes, |
||||
} |
||||
} |
||||
|
||||
// node retrieves the trie node with node path and its trie identifier.
|
||||
func (b *buffer) node(owner common.Hash, path []byte) (*trienode.Node, bool) { |
||||
return b.nodes.node(owner, path) |
||||
} |
||||
|
||||
// commit merges the provided states and trie nodes into the buffer.
|
||||
func (b *buffer) commit(nodes *nodeSet) *buffer { |
||||
b.layers++ |
||||
b.nodes.merge(nodes) |
||||
return b |
||||
} |
||||
|
||||
// revert is the reverse operation of commit. It also merges the provided states
|
||||
// and trie nodes into the buffer. The key difference is that the provided state
|
||||
// set should reverse the changes made by the most recent state transition.
|
||||
func (b *buffer) revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[string]*trienode.Node) error { |
||||
// Short circuit if no embedded state transition to revert
|
||||
if b.layers == 0 { |
||||
return errStateUnrecoverable |
||||
} |
||||
b.layers-- |
||||
|
||||
// Reset the entire buffer if only a single transition left
|
||||
if b.layers == 0 { |
||||
b.reset() |
||||
return nil |
||||
} |
||||
b.nodes.revert(db, nodes) |
||||
return nil |
||||
} |
||||
|
||||
// reset cleans up the disk cache.
|
||||
func (b *buffer) reset() { |
||||
b.layers = 0 |
||||
b.nodes.reset() |
||||
} |
||||
|
||||
// empty returns an indicator if buffer is empty.
|
||||
func (b *buffer) empty() bool { |
||||
return b.layers == 0 |
||||
} |
||||
|
||||
// full returns an indicator if the size of accumulated content exceeds the
|
||||
// configured threshold.
|
||||
func (b *buffer) full() bool { |
||||
return b.size() > b.limit |
||||
} |
||||
|
||||
// size returns the approximate memory size of the held content.
|
||||
func (b *buffer) size() uint64 { |
||||
return b.nodes.size |
||||
} |
||||
|
||||
// flush persists the in-memory dirty trie node into the disk if the configured
|
||||
// memory threshold is reached. Note, all data must be written atomically.
|
||||
func (b *buffer) flush(db ethdb.KeyValueStore, freezer ethdb.AncientWriter, nodesCache *fastcache.Cache, id uint64) error { |
||||
// Ensure the target state id is aligned with the internal counter.
|
||||
head := rawdb.ReadPersistentStateID(db) |
||||
if head+b.layers != id { |
||||
return fmt.Errorf("buffer layers (%d) cannot be applied on top of persisted state id (%d) to reach requested state id (%d)", b.layers, head, id) |
||||
} |
||||
// Terminate the state snapshot generation if it's active
|
||||
var ( |
||||
start = time.Now() |
||||
batch = db.NewBatchWithSize(b.nodes.dbsize() * 11 / 10) // extra 10% for potential pebble internal stuff
|
||||
) |
||||
// Explicitly sync the state freezer, ensuring that all written
|
||||
// data is transferred to disk before updating the key-value store.
|
||||
if freezer != nil { |
||||
if err := freezer.Sync(); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
nodes := b.nodes.write(batch, nodesCache) |
||||
rawdb.WritePersistentStateID(batch, id) |
||||
|
||||
// Flush all mutations in a single batch
|
||||
size := batch.ValueSize() |
||||
if err := batch.Write(); err != nil { |
||||
return err |
||||
} |
||||
commitBytesMeter.Mark(int64(size)) |
||||
commitNodesMeter.Mark(int64(nodes)) |
||||
commitTimeTimer.UpdateSince(start) |
||||
b.reset() |
||||
log.Debug("Persisted buffer content", "nodes", nodes, "bytes", common.StorageSize(size), "elapsed", common.PrettyDuration(time.Since(start))) |
||||
return nil |
||||
} |
@ -0,0 +1,65 @@ |
||||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package pathdb |
||||
|
||||
import ( |
||||
"github.com/VictoriaMetrics/fastcache" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/core/rawdb" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/trie/trienode" |
||||
) |
||||
|
||||
// nodeCacheKey constructs the unique key of clean cache. The assumption is held
|
||||
// that zero address does not have any associated storage slots.
|
||||
func nodeCacheKey(owner common.Hash, path []byte) []byte { |
||||
if owner == (common.Hash{}) { |
||||
return path |
||||
} |
||||
return append(owner.Bytes(), path...) |
||||
} |
||||
|
||||
// writeNodes writes the trie nodes into the provided database batch.
|
||||
// Note this function will also inject all the newly written nodes
|
||||
// into clean cache.
|
||||
func writeNodes(batch ethdb.Batch, nodes map[common.Hash]map[string]*trienode.Node, clean *fastcache.Cache) (total int) { |
||||
for owner, subset := range nodes { |
||||
for path, n := range subset { |
||||
if n.IsDeleted() { |
||||
if owner == (common.Hash{}) { |
||||
rawdb.DeleteAccountTrieNode(batch, []byte(path)) |
||||
} else { |
||||
rawdb.DeleteStorageTrieNode(batch, owner, []byte(path)) |
||||
} |
||||
if clean != nil { |
||||
clean.Del(nodeCacheKey(owner, []byte(path))) |
||||
} |
||||
} else { |
||||
if owner == (common.Hash{}) { |
||||
rawdb.WriteAccountTrieNode(batch, []byte(path), n.Blob) |
||||
} else { |
||||
rawdb.WriteStorageTrieNode(batch, owner, []byte(path), n.Blob) |
||||
} |
||||
if clean != nil { |
||||
clean.Set(nodeCacheKey(owner, []byte(path)), n.Blob) |
||||
} |
||||
} |
||||
} |
||||
total += len(subset) |
||||
} |
||||
return total |
||||
} |
@ -1,290 +0,0 @@ |
||||
// Copyright 2022 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package pathdb |
||||
|
||||
import ( |
||||
"bytes" |
||||
"fmt" |
||||
"maps" |
||||
"time" |
||||
|
||||
"github.com/VictoriaMetrics/fastcache" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/core/rawdb" |
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/trie/trienode" |
||||
) |
||||
|
||||
// nodebuffer is a collection of modified trie nodes to aggregate the disk
|
||||
// write. The content of the nodebuffer must be checked before diving into
|
||||
// disk (since it basically is not-yet-written data).
|
||||
type nodebuffer struct { |
||||
layers uint64 // The number of diff layers aggregated inside
|
||||
size uint64 // The size of aggregated writes
|
||||
limit uint64 // The maximum memory allowance in bytes
|
||||
nodes map[common.Hash]map[string]*trienode.Node // The dirty node set, mapped by owner and path
|
||||
} |
||||
|
||||
// newNodeBuffer initializes the node buffer with the provided nodes.
|
||||
func newNodeBuffer(limit int, nodes map[common.Hash]map[string]*trienode.Node, layers uint64) *nodebuffer { |
||||
if nodes == nil { |
||||
nodes = make(map[common.Hash]map[string]*trienode.Node) |
||||
} |
||||
var size uint64 |
||||
for _, subset := range nodes { |
||||
for path, n := range subset { |
||||
size += uint64(len(n.Blob) + len(path)) |
||||
} |
||||
} |
||||
return &nodebuffer{ |
||||
layers: layers, |
||||
nodes: nodes, |
||||
size: size, |
||||
limit: uint64(limit), |
||||
} |
||||
} |
||||
|
||||
// node retrieves the trie node with given node info.
|
||||
func (b *nodebuffer) node(owner common.Hash, path []byte) (*trienode.Node, bool) { |
||||
subset, ok := b.nodes[owner] |
||||
if !ok { |
||||
return nil, false |
||||
} |
||||
n, ok := subset[string(path)] |
||||
if !ok { |
||||
return nil, false |
||||
} |
||||
return n, true |
||||
} |
||||
|
||||
// commit merges the dirty nodes into the nodebuffer. This operation won't take
|
||||
// the ownership of the nodes map which belongs to the bottom-most diff layer.
|
||||
// It will just hold the node references from the given map which are safe to
|
||||
// copy.
|
||||
func (b *nodebuffer) commit(nodes map[common.Hash]map[string]*trienode.Node) *nodebuffer { |
||||
var ( |
||||
delta int64 |
||||
overwrite int64 |
||||
overwriteSize int64 |
||||
) |
||||
for owner, subset := range nodes { |
||||
current, exist := b.nodes[owner] |
||||
if !exist { |
||||
// Allocate a new map for the subset instead of claiming it directly
|
||||
// from the passed map to avoid potential concurrent map read/write.
|
||||
// The nodes belong to original diff layer are still accessible even
|
||||
// after merging, thus the ownership of nodes map should still belong
|
||||
// to original layer and any mutation on it should be prevented.
|
||||
for path, n := range subset { |
||||
delta += int64(len(n.Blob) + len(path)) |
||||
} |
||||
b.nodes[owner] = maps.Clone(subset) |
||||
continue |
||||
} |
||||
for path, n := range subset { |
||||
if orig, exist := current[path]; !exist { |
||||
delta += int64(len(n.Blob) + len(path)) |
||||
} else { |
||||
delta += int64(len(n.Blob) - len(orig.Blob)) |
||||
overwrite++ |
||||
overwriteSize += int64(len(orig.Blob) + len(path)) |
||||
} |
||||
current[path] = n |
||||
} |
||||
b.nodes[owner] = current |
||||
} |
||||
b.updateSize(delta) |
||||
b.layers++ |
||||
gcNodesMeter.Mark(overwrite) |
||||
gcBytesMeter.Mark(overwriteSize) |
||||
return b |
||||
} |
||||
|
||||
// revert is the reverse operation of commit. It also merges the provided nodes
|
||||
// into the nodebuffer, the difference is that the provided node set should
|
||||
// revert the changes made by the last state transition.
|
||||
func (b *nodebuffer) revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[string]*trienode.Node) error { |
||||
// Short circuit if no embedded state transition to revert.
|
||||
if b.layers == 0 { |
||||
return errStateUnrecoverable |
||||
} |
||||
b.layers-- |
||||
|
||||
// Reset the entire buffer if only a single transition left.
|
||||
if b.layers == 0 { |
||||
b.reset() |
||||
return nil |
||||
} |
||||
var delta int64 |
||||
for owner, subset := range nodes { |
||||
current, ok := b.nodes[owner] |
||||
if !ok { |
||||
panic(fmt.Sprintf("non-existent subset (%x)", owner)) |
||||
} |
||||
for path, n := range subset { |
||||
orig, ok := current[path] |
||||
if !ok { |
||||
// There is a special case in MPT that one child is removed from
|
||||
// a fullNode which only has two children, and then a new child
|
||||
// with different position is immediately inserted into the fullNode.
|
||||
// In this case, the clean child of the fullNode will also be
|
||||
// marked as dirty because of node collapse and expansion.
|
||||
//
|
||||
// In case of database rollback, don't panic if this "clean"
|
||||
// node occurs which is not present in buffer.
|
||||
var blob []byte |
||||
if owner == (common.Hash{}) { |
||||
blob = rawdb.ReadAccountTrieNode(db, []byte(path)) |
||||
} else { |
||||
blob = rawdb.ReadStorageTrieNode(db, owner, []byte(path)) |
||||
} |
||||
// Ignore the clean node in the case described above.
|
||||
if bytes.Equal(blob, n.Blob) { |
||||
continue |
||||
} |
||||
panic(fmt.Sprintf("non-existent node (%x %v) blob: %v", owner, path, crypto.Keccak256Hash(n.Blob).Hex())) |
||||
} |
||||
current[path] = n |
||||
delta += int64(len(n.Blob)) - int64(len(orig.Blob)) |
||||
} |
||||
} |
||||
b.updateSize(delta) |
||||
return nil |
||||
} |
||||
|
||||
// updateSize updates the total cache size by the given delta.
|
||||
func (b *nodebuffer) updateSize(delta int64) { |
||||
size := int64(b.size) + delta |
||||
if size >= 0 { |
||||
b.size = uint64(size) |
||||
return |
||||
} |
||||
s := b.size |
||||
b.size = 0 |
||||
log.Error("Invalid pathdb buffer size", "prev", common.StorageSize(s), "delta", common.StorageSize(delta)) |
||||
} |
||||
|
||||
// reset cleans up the disk cache.
|
||||
func (b *nodebuffer) reset() { |
||||
b.layers = 0 |
||||
b.size = 0 |
||||
b.nodes = make(map[common.Hash]map[string]*trienode.Node) |
||||
} |
||||
|
||||
// empty returns an indicator if nodebuffer contains any state transition inside.
|
||||
func (b *nodebuffer) empty() bool { |
||||
return b.layers == 0 |
||||
} |
||||
|
||||
// setSize sets the buffer size to the provided number, and invokes a flush
|
||||
// operation if the current memory usage exceeds the new limit.
|
||||
func (b *nodebuffer) setSize(size int, db ethdb.KeyValueStore, freezer ethdb.AncientStore, clean *fastcache.Cache, id uint64) error { |
||||
b.limit = uint64(size) |
||||
return b.flush(db, freezer, clean, id, false) |
||||
} |
||||
|
||||
// allocBatch returns a database batch with pre-allocated buffer.
|
||||
func (b *nodebuffer) allocBatch(db ethdb.KeyValueStore) ethdb.Batch { |
||||
var metasize int |
||||
for owner, nodes := range b.nodes { |
||||
if owner == (common.Hash{}) { |
||||
metasize += len(nodes) * len(rawdb.TrieNodeAccountPrefix) // database key prefix
|
||||
} else { |
||||
metasize += len(nodes) * (len(rawdb.TrieNodeStoragePrefix) + common.HashLength) // database key prefix + owner
|
||||
} |
||||
} |
||||
return db.NewBatchWithSize((metasize + int(b.size)) * 11 / 10) // extra 10% for potential pebble internal stuff
|
||||
} |
||||
|
||||
// flush persists the in-memory dirty trie node into the disk if the configured
|
||||
// memory threshold is reached. Note, all data must be written atomically.
|
||||
func (b *nodebuffer) flush(db ethdb.KeyValueStore, freezer ethdb.AncientWriter, clean *fastcache.Cache, id uint64, force bool) error { |
||||
if b.size <= b.limit && !force { |
||||
return nil |
||||
} |
||||
// Ensure the target state id is aligned with the internal counter.
|
||||
head := rawdb.ReadPersistentStateID(db) |
||||
if head+b.layers != id { |
||||
return fmt.Errorf("buffer layers (%d) cannot be applied on top of persisted state id (%d) to reach requested state id (%d)", b.layers, head, id) |
||||
} |
||||
var ( |
||||
start = time.Now() |
||||
batch = b.allocBatch(db) |
||||
) |
||||
// Explicitly sync the state freezer, ensuring that all written
|
||||
// data is transferred to disk before updating the key-value store.
|
||||
if freezer != nil { |
||||
if err := freezer.Sync(); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
nodes := writeNodes(batch, b.nodes, clean) |
||||
rawdb.WritePersistentStateID(batch, id) |
||||
|
||||
// Flush all mutations in a single batch
|
||||
size := batch.ValueSize() |
||||
if err := batch.Write(); err != nil { |
||||
return err |
||||
} |
||||
commitBytesMeter.Mark(int64(size)) |
||||
commitNodesMeter.Mark(int64(nodes)) |
||||
commitTimeTimer.UpdateSince(start) |
||||
log.Debug("Persisted pathdb nodes", "nodes", len(b.nodes), "bytes", common.StorageSize(size), "elapsed", common.PrettyDuration(time.Since(start))) |
||||
b.reset() |
||||
return nil |
||||
} |
||||
|
||||
// writeNodes writes the trie nodes into the provided database batch.
|
||||
// Note this function will also inject all the newly written nodes
|
||||
// into clean cache.
|
||||
func writeNodes(batch ethdb.Batch, nodes map[common.Hash]map[string]*trienode.Node, clean *fastcache.Cache) (total int) { |
||||
for owner, subset := range nodes { |
||||
for path, n := range subset { |
||||
if n.IsDeleted() { |
||||
if owner == (common.Hash{}) { |
||||
rawdb.DeleteAccountTrieNode(batch, []byte(path)) |
||||
} else { |
||||
rawdb.DeleteStorageTrieNode(batch, owner, []byte(path)) |
||||
} |
||||
if clean != nil { |
||||
clean.Del(cacheKey(owner, []byte(path))) |
||||
} |
||||
} else { |
||||
if owner == (common.Hash{}) { |
||||
rawdb.WriteAccountTrieNode(batch, []byte(path), n.Blob) |
||||
} else { |
||||
rawdb.WriteStorageTrieNode(batch, owner, []byte(path), n.Blob) |
||||
} |
||||
if clean != nil { |
||||
clean.Set(cacheKey(owner, []byte(path)), n.Blob) |
||||
} |
||||
} |
||||
} |
||||
total += len(subset) |
||||
} |
||||
return total |
||||
} |
||||
|
||||
// cacheKey constructs the unique key of clean cache.
|
||||
func cacheKey(owner common.Hash, path []byte) []byte { |
||||
if owner == (common.Hash{}) { |
||||
return path |
||||
} |
||||
return append(owner.Bytes(), path...) |
||||
} |
@ -0,0 +1,246 @@ |
||||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
|
||||
|
||||
package pathdb |
||||
|
||||
import ( |
||||
"bytes" |
||||
"fmt" |
||||
"io" |
||||
"maps" |
||||
|
||||
"github.com/VictoriaMetrics/fastcache" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/core/rawdb" |
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
"github.com/ethereum/go-ethereum/trie/trienode" |
||||
) |
||||
|
||||
// nodeSet represents a collection of modified trie nodes resulting from a state
|
||||
// transition, typically corresponding to a block execution. It can also represent
|
||||
// the combined trie node set from several aggregated state transitions.
|
||||
type nodeSet struct { |
||||
size uint64 // aggregated size of the trie node
|
||||
nodes map[common.Hash]map[string]*trienode.Node // node set, mapped by owner and path
|
||||
} |
||||
|
||||
// newNodeSet constructs the set with the provided dirty trie nodes.
|
||||
func newNodeSet(nodes map[common.Hash]map[string]*trienode.Node) *nodeSet { |
||||
// Don't panic for the lazy callers, initialize the nil map instead
|
||||
if nodes == nil { |
||||
nodes = make(map[common.Hash]map[string]*trienode.Node) |
||||
} |
||||
s := &nodeSet{nodes: nodes} |
||||
s.computeSize() |
||||
return s |
||||
} |
||||
|
||||
// computeSize calculates the database size of the held trie nodes.
|
||||
func (s *nodeSet) computeSize() { |
||||
var size uint64 |
||||
for owner, subset := range s.nodes { |
||||
var prefix int |
||||
if owner != (common.Hash{}) { |
||||
prefix = common.HashLength // owner (32 bytes) for storage trie nodes
|
||||
} |
||||
for path, n := range subset { |
||||
size += uint64(prefix + len(n.Blob) + len(path)) |
||||
} |
||||
} |
||||
s.size = size |
||||
} |
||||
|
||||
// updateSize updates the total cache size by the given delta.
|
||||
func (s *nodeSet) updateSize(delta int64) { |
||||
size := int64(s.size) + delta |
||||
if size >= 0 { |
||||
s.size = uint64(size) |
||||
return |
||||
} |
||||
log.Error("Nodeset size underflow", "prev", common.StorageSize(s.size), "delta", common.StorageSize(delta)) |
||||
s.size = 0 |
||||
} |
||||
|
||||
// node retrieves the trie node with node path and its trie identifier.
|
||||
func (s *nodeSet) node(owner common.Hash, path []byte) (*trienode.Node, bool) { |
||||
subset, ok := s.nodes[owner] |
||||
if !ok { |
||||
return nil, false |
||||
} |
||||
n, ok := subset[string(path)] |
||||
if !ok { |
||||
return nil, false |
||||
} |
||||
return n, true |
||||
} |
||||
|
||||
// merge integrates the provided dirty nodes into the set. The provided nodeset
|
||||
// will remain unchanged, as it may still be referenced by other layers.
|
||||
func (s *nodeSet) merge(set *nodeSet) { |
||||
var ( |
||||
delta int64 // size difference resulting from node merging
|
||||
overwrite counter // counter of nodes being overwritten
|
||||
) |
||||
for owner, subset := range set.nodes { |
||||
var prefix int |
||||
if owner != (common.Hash{}) { |
||||
prefix = common.HashLength |
||||
} |
||||
current, exist := s.nodes[owner] |
||||
if !exist { |
||||
for path, n := range subset { |
||||
delta += int64(prefix + len(n.Blob) + len(path)) |
||||
} |
||||
// Perform a shallow copy of the map for the subset instead of claiming it
|
||||
// directly from the provided nodeset to avoid potential concurrent map
|
||||
// read/write issues. The nodes belonging to the original diff layer remain
|
||||
// accessible even after merging. Therefore, ownership of the nodes map
|
||||
// should still belong to the original layer, and any modifications to it
|
||||
// should be prevented.
|
||||
s.nodes[owner] = maps.Clone(subset) |
||||
continue |
||||
} |
||||
for path, n := range subset { |
||||
if orig, exist := current[path]; !exist { |
||||
delta += int64(prefix + len(n.Blob) + len(path)) |
||||
} else { |
||||
delta += int64(len(n.Blob) - len(orig.Blob)) |
||||
overwrite.add(prefix + len(orig.Blob) + len(path)) |
||||
} |
||||
current[path] = n |
||||
} |
||||
s.nodes[owner] = current |
||||
} |
||||
overwrite.report(gcTrieNodeMeter, gcTrieNodeBytesMeter) |
||||
s.updateSize(delta) |
||||
} |
||||
|
||||
// revert merges the provided trie nodes into the set. This should reverse the
|
||||
// changes made by the most recent state transition.
|
||||
func (s *nodeSet) revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[string]*trienode.Node) { |
||||
var delta int64 |
||||
for owner, subset := range nodes { |
||||
current, ok := s.nodes[owner] |
||||
if !ok { |
||||
panic(fmt.Sprintf("non-existent subset (%x)", owner)) |
||||
} |
||||
for path, n := range subset { |
||||
orig, ok := current[path] |
||||
if !ok { |
||||
// There is a special case in merkle tree that one child is removed
|
||||
// from a fullNode which only has two children, and then a new child
|
||||
// with different position is immediately inserted into the fullNode.
|
||||
// In this case, the clean child of the fullNode will also be marked
|
||||
// as dirty because of node collapse and expansion. In case of database
|
||||
// rollback, don't panic if this "clean" node occurs which is not
|
||||
// present in buffer.
|
||||
var blob []byte |
||||
if owner == (common.Hash{}) { |
||||
blob = rawdb.ReadAccountTrieNode(db, []byte(path)) |
||||
} else { |
||||
blob = rawdb.ReadStorageTrieNode(db, owner, []byte(path)) |
||||
} |
||||
// Ignore the clean node in the case described above.
|
||||
if bytes.Equal(blob, n.Blob) { |
||||
continue |
||||
} |
||||
panic(fmt.Sprintf("non-existent node (%x %v) blob: %v", owner, path, crypto.Keccak256Hash(n.Blob).Hex())) |
||||
} |
||||
current[path] = n |
||||
delta += int64(len(n.Blob)) - int64(len(orig.Blob)) |
||||
} |
||||
} |
||||
s.updateSize(delta) |
||||
} |
||||
|
||||
// journalNode represents a trie node persisted in the journal.
|
||||
type journalNode struct { |
||||
Path []byte // Path of the node in the trie
|
||||
Blob []byte // RLP-encoded trie node blob, nil means the node is deleted
|
||||
} |
||||
|
||||
// journalNodes represents a list trie nodes belong to a single account
|
||||
// or the main account trie.
|
||||
type journalNodes struct { |
||||
Owner common.Hash |
||||
Nodes []journalNode |
||||
} |
||||
|
||||
// encode serializes the content of trie nodes into the provided writer.
|
||||
func (s *nodeSet) encode(w io.Writer) error { |
||||
nodes := make([]journalNodes, 0, len(s.nodes)) |
||||
for owner, subset := range s.nodes { |
||||
entry := journalNodes{Owner: owner} |
||||
for path, node := range subset { |
||||
entry.Nodes = append(entry.Nodes, journalNode{ |
||||
Path: []byte(path), |
||||
Blob: node.Blob, |
||||
}) |
||||
} |
||||
nodes = append(nodes, entry) |
||||
} |
||||
return rlp.Encode(w, nodes) |
||||
} |
||||
|
||||
// decode deserializes the content from the rlp stream into the nodeset.
|
||||
func (s *nodeSet) decode(r *rlp.Stream) error { |
||||
var encoded []journalNodes |
||||
if err := r.Decode(&encoded); err != nil { |
||||
return fmt.Errorf("load nodes: %v", err) |
||||
} |
||||
nodes := make(map[common.Hash]map[string]*trienode.Node) |
||||
for _, entry := range encoded { |
||||
subset := make(map[string]*trienode.Node) |
||||
for _, n := range entry.Nodes { |
||||
if len(n.Blob) > 0 { |
||||
subset[string(n.Path)] = trienode.New(crypto.Keccak256Hash(n.Blob), n.Blob) |
||||
} else { |
||||
subset[string(n.Path)] = trienode.NewDeleted() |
||||
} |
||||
} |
||||
nodes[entry.Owner] = subset |
||||
} |
||||
s.nodes = nodes |
||||
s.computeSize() |
||||
return nil |
||||
} |
||||
|
||||
// write flushes nodes into the provided database batch as a whole.
|
||||
func (s *nodeSet) write(batch ethdb.Batch, clean *fastcache.Cache) int { |
||||
return writeNodes(batch, s.nodes, clean) |
||||
} |
||||
|
||||
// reset clears all cached trie node data.
|
||||
func (s *nodeSet) reset() { |
||||
s.nodes = make(map[common.Hash]map[string]*trienode.Node) |
||||
s.size = 0 |
||||
} |
||||
|
||||
// dbsize returns the approximate size of db write.
|
||||
func (s *nodeSet) dbsize() int { |
||||
var m int |
||||
for owner, nodes := range s.nodes { |
||||
if owner == (common.Hash{}) { |
||||
m += len(nodes) * len(rawdb.TrieNodeAccountPrefix) // database key prefix
|
||||
} else { |
||||
m += len(nodes) * (len(rawdb.TrieNodeStoragePrefix)) // database key prefix
|
||||
} |
||||
} |
||||
return m + int(s.size) |
||||
} |
@ -0,0 +1,166 @@ |
||||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
|
||||
|
||||
package pathdb |
||||
|
||||
import ( |
||||
"fmt" |
||||
"io" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/metrics" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
) |
||||
|
||||
// counter helps in tracking items and their corresponding sizes.
|
||||
type counter struct { |
||||
n int |
||||
size int |
||||
} |
||||
|
||||
// add size to the counter and increase the item counter.
|
||||
func (c *counter) add(size int) { |
||||
c.n++ |
||||
c.size += size |
||||
} |
||||
|
||||
// report uploads the cached statistics to meters.
|
||||
func (c *counter) report(count metrics.Meter, size metrics.Meter) { |
||||
count.Mark(int64(c.n)) |
||||
size.Mark(int64(c.size)) |
||||
} |
||||
|
||||
// StateSetWithOrigin wraps the state set with additional original values of the
|
||||
// mutated states.
|
||||
type StateSetWithOrigin struct { |
||||
// AccountOrigin represents the account data before the state transition,
|
||||
// corresponding to both the accountData and destructSet. It's keyed by the
|
||||
// account address. The nil value means the account was not present before.
|
||||
accountOrigin map[common.Address][]byte |
||||
|
||||
// StorageOrigin represents the storage data before the state transition,
|
||||
// corresponding to storageData and deleted slots of destructSet. It's keyed
|
||||
// by the account address and slot key hash. The nil value means the slot was
|
||||
// not present.
|
||||
storageOrigin map[common.Address]map[common.Hash][]byte |
||||
|
||||
// Memory size of the state data (accountOrigin and storageOrigin)
|
||||
size uint64 |
||||
} |
||||
|
||||
// NewStateSetWithOrigin constructs the state set with the provided data.
|
||||
func NewStateSetWithOrigin(accountOrigin map[common.Address][]byte, storageOrigin map[common.Address]map[common.Hash][]byte) *StateSetWithOrigin { |
||||
// Don't panic for the lazy callers, initialize the nil maps instead.
|
||||
if accountOrigin == nil { |
||||
accountOrigin = make(map[common.Address][]byte) |
||||
} |
||||
if storageOrigin == nil { |
||||
storageOrigin = make(map[common.Address]map[common.Hash][]byte) |
||||
} |
||||
// Count the memory size occupied by the set. Note that each slot key here
|
||||
// uses 2*common.HashLength to keep consistent with the calculation method
|
||||
// of stateSet.
|
||||
var size int |
||||
for _, data := range accountOrigin { |
||||
size += common.HashLength + len(data) |
||||
} |
||||
for _, slots := range storageOrigin { |
||||
for _, data := range slots { |
||||
size += 2*common.HashLength + len(data) |
||||
} |
||||
} |
||||
return &StateSetWithOrigin{ |
||||
accountOrigin: accountOrigin, |
||||
storageOrigin: storageOrigin, |
||||
size: uint64(size), |
||||
} |
||||
} |
||||
|
||||
// encode serializes the content of state set into the provided writer.
|
||||
func (s *StateSetWithOrigin) encode(w io.Writer) error { |
||||
// Encode accounts
|
||||
type Accounts struct { |
||||
Addresses []common.Address |
||||
Accounts [][]byte |
||||
} |
||||
var accounts Accounts |
||||
for address, blob := range s.accountOrigin { |
||||
accounts.Addresses = append(accounts.Addresses, address) |
||||
accounts.Accounts = append(accounts.Accounts, blob) |
||||
} |
||||
if err := rlp.Encode(w, accounts); err != nil { |
||||
return err |
||||
} |
||||
// Encode storages
|
||||
type Storage struct { |
||||
Address common.Address |
||||
Keys []common.Hash |
||||
Blobs [][]byte |
||||
} |
||||
storages := make([]Storage, 0, len(s.storageOrigin)) |
||||
for address, slots := range s.storageOrigin { |
||||
keys := make([]common.Hash, 0, len(slots)) |
||||
vals := make([][]byte, 0, len(slots)) |
||||
for key, val := range slots { |
||||
keys = append(keys, key) |
||||
vals = append(vals, val) |
||||
} |
||||
storages = append(storages, Storage{Address: address, Keys: keys, Blobs: vals}) |
||||
} |
||||
return rlp.Encode(w, storages) |
||||
} |
||||
|
||||
// decode deserializes the content from the rlp stream into the state set.
|
||||
func (s *StateSetWithOrigin) decode(r *rlp.Stream) error { |
||||
// Decode account origin
|
||||
type Accounts struct { |
||||
Addresses []common.Address |
||||
Accounts [][]byte |
||||
} |
||||
var ( |
||||
accounts Accounts |
||||
accountSet = make(map[common.Address][]byte) |
||||
) |
||||
if err := r.Decode(&accounts); err != nil { |
||||
return fmt.Errorf("load diff account origin set: %v", err) |
||||
} |
||||
for i := 0; i < len(accounts.Accounts); i++ { |
||||
accountSet[accounts.Addresses[i]] = accounts.Accounts[i] |
||||
} |
||||
s.accountOrigin = accountSet |
||||
|
||||
// Decode storage origin
|
||||
type Storage struct { |
||||
Address common.Address |
||||
Keys []common.Hash |
||||
Blobs [][]byte |
||||
} |
||||
var ( |
||||
storages []Storage |
||||
storageSet = make(map[common.Address]map[common.Hash][]byte) |
||||
) |
||||
if err := r.Decode(&storages); err != nil { |
||||
return fmt.Errorf("load diff storage origin: %v", err) |
||||
} |
||||
for _, storage := range storages { |
||||
storageSet[storage.Address] = make(map[common.Hash][]byte) |
||||
for i := 0; i < len(storage.Keys); i++ { |
||||
storageSet[storage.Address][storage.Keys[i]] = storage.Blobs[i] |
||||
} |
||||
} |
||||
s.storageOrigin = storageSet |
||||
return nil |
||||
} |
@ -0,0 +1,51 @@ |
||||
// Copyright 2023 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>
|
||||
|
||||
package triedb |
||||
|
||||
import ( |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/triedb/pathdb" |
||||
) |
||||
|
||||
// StateSet represents a collection of mutated states during a state transition.
|
||||
type StateSet struct { |
||||
Destructs map[common.Hash]struct{} // Destructed accounts
|
||||
Accounts map[common.Hash][]byte // Mutated accounts in 'slim RLP' encoding
|
||||
AccountsOrigin map[common.Address][]byte // Original values of mutated accounts in 'slim RLP' encoding
|
||||
Storages map[common.Hash]map[common.Hash][]byte // Mutated storage slots in 'prefix-zero-trimmed' RLP format
|
||||
StoragesOrigin map[common.Address]map[common.Hash][]byte // Original values of mutated storage slots in 'prefix-zero-trimmed' RLP format
|
||||
} |
||||
|
||||
// NewStateSet initializes an empty state set.
|
||||
func NewStateSet() *StateSet { |
||||
return &StateSet{ |
||||
Destructs: make(map[common.Hash]struct{}), |
||||
Accounts: make(map[common.Hash][]byte), |
||||
AccountsOrigin: make(map[common.Address][]byte), |
||||
Storages: make(map[common.Hash]map[common.Hash][]byte), |
||||
StoragesOrigin: make(map[common.Address]map[common.Hash][]byte), |
||||
} |
||||
} |
||||
|
||||
// internal returns a state set for path database internal usage.
|
||||
func (set *StateSet) internal() *pathdb.StateSetWithOrigin { |
||||
// the nil state set is possible in tests.
|
||||
if set == nil { |
||||
return nil |
||||
} |
||||
return pathdb.NewStateSetWithOrigin(set.AccountsOrigin, set.StoragesOrigin) |
||||
} |
Loading…
Reference in new issue