trie, triedb: remove unnecessary child resolver interface (#30167)

pull/30104/head^2
rjl493456442 4 months ago committed by GitHub
parent 0d38b0cd34
commit b530d8e455
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 8
      trie/committer.go
  2. 10
      triedb/database.go
  3. 30
      triedb/hashdb/database.go

@ -154,12 +154,8 @@ func (c *committer) store(path []byte, n node) node {
return hash
}
// MerkleResolver the children resolver in merkle-patricia-tree.
type MerkleResolver struct{}
// ForEach implements childResolver, decodes the provided node and
// traverses the children inside.
func (resolver MerkleResolver) ForEach(node []byte, onChild func(common.Hash)) {
// ForGatherChildren decodes the provided node and traverses the children inside.
func ForGatherChildren(node []byte, onChild func(common.Hash)) {
forGatherChildren(mustDecodeNodeUnsafe(nil, node), onChild)
}

@ -23,7 +23,6 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/trie/triestate"
"github.com/ethereum/go-ethereum/triedb/database"
@ -112,14 +111,7 @@ func NewDatabase(diskdb ethdb.Database, config *Config) *Database {
if config.PathDB != nil {
db.backend = pathdb.New(diskdb, config.PathDB, config.IsVerkle)
} else {
var resolver hashdb.ChildResolver
if config.IsVerkle {
// TODO define verkle resolver
log.Crit("verkle does not use a hash db")
} else {
resolver = trie.MerkleResolver{}
}
db.backend = hashdb.New(diskdb, config.HashDB, resolver)
db.backend = hashdb.New(diskdb, config.HashDB)
}
return db
}

@ -31,6 +31,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/trie/triestate"
"github.com/ethereum/go-ethereum/triedb/database"
@ -60,12 +61,6 @@ var (
memcacheCommitBytesMeter = metrics.NewRegisteredMeter("hashdb/memcache/commit/bytes", nil)
)
// ChildResolver defines the required method to decode the provided
// trie node and iterate the children on top.
type ChildResolver interface {
ForEach(node []byte, onChild func(common.Hash))
}
// Config contains the settings for database.
type Config struct {
CleanCacheSize int // Maximum memory allowance (in bytes) for caching clean nodes
@ -84,9 +79,7 @@ var Defaults = &Config{
// the disk database. The aim is to accumulate trie writes in-memory and only
// periodically flush a couple tries to disk, garbage collecting the remainder.
type Database struct {
diskdb ethdb.Database // Persistent storage for matured trie nodes
resolver ChildResolver // The handler to resolve children of nodes
diskdb ethdb.Database // Persistent storage for matured trie nodes
cleans *fastcache.Cache // GC friendly memory cache of clean node RLPs
dirties map[common.Hash]*cachedNode // Data and references relationships of dirty trie nodes
oldest common.Hash // Oldest tracked node, flush-list head
@ -124,15 +117,15 @@ var cachedNodeSize = int(reflect.TypeOf(cachedNode{}).Size())
// forChildren invokes the callback for all the tracked children of this node,
// both the implicit ones from inside the node as well as the explicit ones
// from outside the node.
func (n *cachedNode) forChildren(resolver ChildResolver, onChild func(hash common.Hash)) {
func (n *cachedNode) forChildren(onChild func(hash common.Hash)) {
for child := range n.external {
onChild(child)
}
resolver.ForEach(n.node, onChild)
trie.ForGatherChildren(n.node, onChild)
}
// New initializes the hash-based node database.
func New(diskdb ethdb.Database, config *Config, resolver ChildResolver) *Database {
func New(diskdb ethdb.Database, config *Config) *Database {
if config == nil {
config = Defaults
}
@ -141,10 +134,9 @@ func New(diskdb ethdb.Database, config *Config, resolver ChildResolver) *Databas
cleans = fastcache.New(config.CleanCacheSize)
}
return &Database{
diskdb: diskdb,
resolver: resolver,
cleans: cleans,
dirties: make(map[common.Hash]*cachedNode),
diskdb: diskdb,
cleans: cleans,
dirties: make(map[common.Hash]*cachedNode),
}
}
@ -163,7 +155,7 @@ func (db *Database) insert(hash common.Hash, node []byte) {
node: node,
flushPrev: db.newest,
}
entry.forChildren(db.resolver, func(child common.Hash) {
entry.forChildren(func(child common.Hash) {
if c := db.dirties[child]; c != nil {
c.parents++
}
@ -316,7 +308,7 @@ func (db *Database) dereference(hash common.Hash) {
db.dirties[node.flushNext].flushPrev = node.flushPrev
}
// Dereference all children and delete the node
node.forChildren(db.resolver, func(child common.Hash) {
node.forChildren(func(child common.Hash) {
db.dereference(child)
})
delete(db.dirties, hash)
@ -465,7 +457,7 @@ func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleane
var err error
// Dereference all children and delete the node
node.forChildren(db.resolver, func(child common.Hash) {
node.forChildren(func(child common.Hash) {
if err == nil {
err = db.commit(child, batch, uncacher)
}

Loading…
Cancel
Save