cmd/geth, eth, core: snapshot dump + unify with trie dump (#22795)

* cmd/geth, eth, core: snapshot dump + unify with trie dump

* cmd/evm: dump API fixes

* cmd/geth, core, eth: fix some remaining errors

* cmd/evm: dump - add limit, support address startkey, address review concerns

* cmd, core/state, eth: minor polishes, fix snap dump crash, unify format

Co-authored-by: Péter Szilágyi <peterke@gmail.com>
pull/22870/head
Martin Holst Swende 3 years ago committed by GitHub
parent 1cca781a02
commit addd8824cf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 7
      cmd/evm/internal/t8ntool/transition.go
  2. 2
      cmd/evm/runner.go
  3. 10
      cmd/evm/staterunner.go
  4. 100
      cmd/geth/chaincmd.go
  5. 97
      cmd/geth/snapshot.go
  6. 12
      cmd/utils/flags.go
  7. 76
      core/state/dump.go
  8. 19
      core/state/state_test.go
  9. 19
      eth/api.go
  10. 21
      eth/api_test.go

@ -212,16 +212,15 @@ func Main(ctx *cli.Context) error {
// Iterate over all the tests, run them and aggregate the results // Iterate over all the tests, run them and aggregate the results
// Run the test and aggregate the result // Run the test and aggregate the result
state, result, err := prestate.Apply(vmConfig, chainConfig, txs, ctx.Int64(RewardFlag.Name), getTracer) s, result, err := prestate.Apply(vmConfig, chainConfig, txs, ctx.Int64(RewardFlag.Name), getTracer)
if err != nil { if err != nil {
return err return err
} }
body, _ := rlp.EncodeToBytes(txs) body, _ := rlp.EncodeToBytes(txs)
// Dump the excution result // Dump the excution result
collector := make(Alloc) collector := make(Alloc)
state.DumpToCollector(collector, false, false, false, nil, -1) s.DumpToCollector(collector, nil)
return dispatchOutput(ctx, baseDir, result, collector, body) return dispatchOutput(ctx, baseDir, result, collector, body)
} }
// txWithKey is a helper-struct, to allow us to use the types.Transaction along with // txWithKey is a helper-struct, to allow us to use the types.Transaction along with
@ -303,7 +302,7 @@ func (g Alloc) OnAccount(addr common.Address, dumpAccount state.DumpAccount) {
} }
} }
genesisAccount := core.GenesisAccount{ genesisAccount := core.GenesisAccount{
Code: common.FromHex(dumpAccount.Code), Code: dumpAccount.Code,
Storage: storage, Storage: storage,
Balance: balance, Balance: balance,
Nonce: dumpAccount.Nonce, Nonce: dumpAccount.Nonce,

@ -270,7 +270,7 @@ func runCmd(ctx *cli.Context) error {
if ctx.GlobalBool(DumpFlag.Name) { if ctx.GlobalBool(DumpFlag.Name) {
statedb.Commit(true) statedb.Commit(true)
statedb.IntermediateRoot(true) statedb.IntermediateRoot(true)
fmt.Println(string(statedb.Dump(false, false, true))) fmt.Println(string(statedb.Dump(nil)))
} }
if memProfilePath := ctx.GlobalString(MemProfileFlag.Name); memProfilePath != "" { if memProfilePath := ctx.GlobalString(MemProfileFlag.Name); memProfilePath != "" {

@ -98,16 +98,16 @@ func stateTestCmd(ctx *cli.Context) error {
for _, st := range test.Subtests() { for _, st := range test.Subtests() {
// Run the test and aggregate the result // Run the test and aggregate the result
result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true} result := &StatetestResult{Name: key, Fork: st.Fork, Pass: true}
_, state, err := test.Run(st, cfg, false) _, s, err := test.Run(st, cfg, false)
// print state root for evmlab tracing // print state root for evmlab tracing
if ctx.GlobalBool(MachineFlag.Name) && state != nil { if ctx.GlobalBool(MachineFlag.Name) && s != nil {
fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", state.IntermediateRoot(false)) fmt.Fprintf(os.Stderr, "{\"stateRoot\": \"%x\"}\n", s.IntermediateRoot(false))
} }
if err != nil { if err != nil {
// Test failed, mark as so and dump any state to aid debugging // Test failed, mark as so and dump any state to aid debugging
result.Pass, result.Error = false, err.Error() result.Pass, result.Error = false, err.Error()
if ctx.GlobalBool(DumpFlag.Name) && state != nil { if ctx.GlobalBool(DumpFlag.Name) && s != nil {
dump := state.RawDump(false, false, true) dump := s.RawDump(nil)
result.State = &dump result.State = &dump
} }
} }

@ -18,6 +18,7 @@ package main
import ( import (
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"os" "os"
"runtime" "runtime"
@ -27,12 +28,16 @@ import (
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/node"
"gopkg.in/urfave/cli.v1" "gopkg.in/urfave/cli.v1"
) )
@ -152,20 +157,21 @@ The export-preimages command export hash preimages to an RLP encoded stream`,
Action: utils.MigrateFlags(dump), Action: utils.MigrateFlags(dump),
Name: "dump", Name: "dump",
Usage: "Dump a specific block from storage", Usage: "Dump a specific block from storage",
ArgsUsage: "[<blockHash> | <blockNum>]...", ArgsUsage: "[? <blockHash> | <blockNum>]",
Flags: []cli.Flag{ Flags: []cli.Flag{
utils.DataDirFlag, utils.DataDirFlag,
utils.CacheFlag, utils.CacheFlag,
utils.SyncModeFlag,
utils.IterativeOutputFlag, utils.IterativeOutputFlag,
utils.ExcludeCodeFlag, utils.ExcludeCodeFlag,
utils.ExcludeStorageFlag, utils.ExcludeStorageFlag,
utils.IncludeIncompletesFlag, utils.IncludeIncompletesFlag,
utils.StartKeyFlag,
utils.DumpLimitFlag,
}, },
Category: "BLOCKCHAIN COMMANDS", Category: "BLOCKCHAIN COMMANDS",
Description: ` Description: `
The arguments are interpreted as block numbers or hashes. This command dumps out the state for a given block (or latest, if none provided).
Use "ethereum dump 0" to dump the genesis block.`, `,
} }
) )
@ -373,47 +379,85 @@ func exportPreimages(ctx *cli.Context) error {
return nil return nil
} }
func dump(ctx *cli.Context) error { func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
db := utils.MakeChainDatabase(ctx, stack, true) db := utils.MakeChainDatabase(ctx, stack, true)
for _, arg := range ctx.Args() {
var header *types.Header var header *types.Header
if ctx.NArg() > 1 {
return nil, nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg())
}
if ctx.NArg() == 1 {
arg := ctx.Args().First()
if hashish(arg) { if hashish(arg) {
hash := common.HexToHash(arg) hash := common.HexToHash(arg)
number := rawdb.ReadHeaderNumber(db, hash) if number := rawdb.ReadHeaderNumber(db, hash); number != nil {
if number != nil {
header = rawdb.ReadHeader(db, hash, *number) header = rawdb.ReadHeader(db, hash, *number)
} else {
return nil, nil, common.Hash{}, fmt.Errorf("block %x not found", hash)
} }
} else { } else {
number, _ := strconv.Atoi(arg) number, err := strconv.Atoi(arg)
hash := rawdb.ReadCanonicalHash(db, uint64(number)) if err != nil {
if hash != (common.Hash{}) { return nil, nil, common.Hash{}, err
}
if hash := rawdb.ReadCanonicalHash(db, uint64(number)); hash != (common.Hash{}) {
header = rawdb.ReadHeader(db, hash, uint64(number)) header = rawdb.ReadHeader(db, hash, uint64(number))
} else {
return nil, nil, common.Hash{}, fmt.Errorf("header for block %d not found", number)
} }
} }
if header == nil {
fmt.Println("{}")
utils.Fatalf("block not found")
} else { } else {
state, err := state.New(header.Root, state.NewDatabase(db), nil) // Use latest
header = rawdb.ReadHeadHeader(db)
}
if header == nil {
return nil, nil, common.Hash{}, errors.New("no head block found")
}
startArg := common.FromHex(ctx.String(utils.StartKeyFlag.Name))
var start common.Hash
switch len(startArg) {
case 0: // common.Hash
case 32:
start = common.BytesToHash(startArg)
case 20:
start = crypto.Keccak256Hash(startArg)
log.Info("Converting start-address to hash", "address", common.BytesToAddress(startArg), "hash", start.Hex())
default:
return nil, nil, common.Hash{}, fmt.Errorf("invalid start argument: %x. 20 or 32 hex-encoded bytes required", startArg)
}
var conf = &state.DumpConfig{
SkipCode: ctx.Bool(utils.ExcludeCodeFlag.Name),
SkipStorage: ctx.Bool(utils.ExcludeStorageFlag.Name),
OnlyWithAddresses: !ctx.Bool(utils.IncludeIncompletesFlag.Name),
Start: start.Bytes(),
Max: ctx.Uint64(utils.DumpLimitFlag.Name),
}
log.Info("State dump configured", "block", header.Number, "hash", header.Hash().Hex(),
"skipcode", conf.SkipCode, "skipstorage", conf.SkipStorage,
"start", hexutil.Encode(conf.Start), "limit", conf.Max)
return conf, db, header.Root, nil
}
func dump(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
conf, db, root, err := parseDumpConfig(ctx, stack)
if err != nil {
return err
}
state, err := state.New(root, state.NewDatabase(db), nil)
if err != nil { if err != nil {
utils.Fatalf("could not create new state: %v", err) return err
} }
excludeCode := ctx.Bool(utils.ExcludeCodeFlag.Name)
excludeStorage := ctx.Bool(utils.ExcludeStorageFlag.Name)
includeMissing := ctx.Bool(utils.IncludeIncompletesFlag.Name)
if ctx.Bool(utils.IterativeOutputFlag.Name) { if ctx.Bool(utils.IterativeOutputFlag.Name) {
state.IterativeDump(excludeCode, excludeStorage, !includeMissing, json.NewEncoder(os.Stdout)) state.IterativeDump(conf, json.NewEncoder(os.Stdout))
} else { } else {
if includeMissing { if conf.OnlyWithAddresses {
fmt.Printf("If you want to include accounts with missing preimages, you need iterative output, since" + fmt.Fprintf(os.Stderr, "If you want to include accounts with missing preimages, you need iterative output, since"+
" otherwise the accounts will overwrite each other in the resulting mapping.") " otherwise the accounts will overwrite each other in the resulting mapping.")
return fmt.Errorf("incompatible options")
} }
fmt.Printf("%v %s\n", includeMissing, state.Dump(excludeCode, excludeStorage, false)) fmt.Println(string(state.Dump(conf)))
}
}
} }
return nil return nil
} }

@ -18,7 +18,9 @@ package main
import ( import (
"bytes" "bytes"
"encoding/json"
"errors" "errors"
"os"
"time" "time"
"github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/cmd/utils"
@ -142,6 +144,31 @@ verification. The default checking target is the HEAD state. It's basically iden
to traverse-state, but the check granularity is smaller. to traverse-state, but the check granularity is smaller.
It's also usable without snapshot enabled. It's also usable without snapshot enabled.
`,
},
{
Name: "dump",
Usage: "Dump a specific block from storage (same as 'geth dump' but using snapshots)",
ArgsUsage: "[? <blockHash> | <blockNum>]",
Action: utils.MigrateFlags(dumpState),
Category: "MISCELLANEOUS COMMANDS",
Flags: []cli.Flag{
utils.DataDirFlag,
utils.AncientFlag,
utils.RopstenFlag,
utils.RinkebyFlag,
utils.GoerliFlag,
utils.ExcludeCodeFlag,
utils.ExcludeStorageFlag,
utils.StartKeyFlag,
utils.DumpLimitFlag,
},
Description: `
This command is semantically equivalent to 'geth dump', but uses the snapshots
as the backend data source, making this command a lot faster.
The argument is interpreted as block number or hash. If none is provided, the latest
block is used.
`, `,
}, },
}, },
@ -430,3 +457,73 @@ func parseRoot(input string) (common.Hash, error) {
} }
return h, nil return h, nil
} }
func dumpState(ctx *cli.Context) error {
stack, _ := makeConfigNode(ctx)
defer stack.Close()
conf, db, root, err := parseDumpConfig(ctx, stack)
if err != nil {
return err
}
snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, root, false, false, false)
if err != nil {
return err
}
accIt, err := snaptree.AccountIterator(root, common.BytesToHash(conf.Start))
if err != nil {
return err
}
defer accIt.Release()
log.Info("Snapshot dumping started", "root", root)
var (
start = time.Now()
logged = time.Now()
accounts uint64
)
enc := json.NewEncoder(os.Stdout)
enc.Encode(struct {
Root common.Hash `json:"root"`
}{root})
for accIt.Next() {
account, err := snapshot.FullAccount(accIt.Account())
if err != nil {
return err
}
da := &state.DumpAccount{
Balance: account.Balance.String(),
Nonce: account.Nonce,
Root: account.Root,
CodeHash: account.CodeHash,
SecureKey: accIt.Hash().Bytes(),
}
if !conf.SkipCode && !bytes.Equal(account.CodeHash, emptyCode) {
da.Code = rawdb.ReadCode(db, common.BytesToHash(account.CodeHash))
}
if !conf.SkipStorage {
da.Storage = make(map[common.Hash]string)
stIt, err := snaptree.StorageIterator(root, accIt.Hash(), common.Hash{})
if err != nil {
return err
}
for stIt.Next() {
da.Storage[stIt.Hash()] = common.Bytes2Hex(stIt.Slot())
}
}
enc.Encode(da)
accounts++
if time.Since(logged) > 8*time.Second {
log.Info("Snapshot dumping in progress", "at", accIt.Hash(), "accounts", accounts,
"elapsed", common.PrettyDuration(time.Since(start)))
logged = time.Now()
}
if conf.Max > 0 && accounts >= conf.Max {
break
}
}
log.Info("Snapshot dumping complete", "accounts", accounts,
"elapsed", common.PrettyDuration(time.Since(start)))
return nil
}

@ -184,7 +184,7 @@ var (
Name: "exitwhensynced", Name: "exitwhensynced",
Usage: "Exits after block synchronisation completes", Usage: "Exits after block synchronisation completes",
} }
IterativeOutputFlag = cli.BoolFlag{ IterativeOutputFlag = cli.BoolTFlag{
Name: "iterative", Name: "iterative",
Usage: "Print streaming JSON iteratively, delimited by newlines", Usage: "Print streaming JSON iteratively, delimited by newlines",
} }
@ -200,6 +200,16 @@ var (
Name: "nocode", Name: "nocode",
Usage: "Exclude contract code (save db lookups)", Usage: "Exclude contract code (save db lookups)",
} }
StartKeyFlag = cli.StringFlag{
Name: "start",
Usage: "Start position. Either a hash or address",
Value: "0x0000000000000000000000000000000000000000000000000000000000000000",
}
DumpLimitFlag = cli.Uint64Flag{
Name: "limit",
Usage: "Max number of elements (0 = no limit)",
Value: 0,
}
defaultSyncMode = ethconfig.Defaults.SyncMode defaultSyncMode = ethconfig.Defaults.SyncMode
SyncModeFlag = TextMarshalerFlag{ SyncModeFlag = TextMarshalerFlag{
Name: "syncmode", Name: "syncmode",

@ -19,6 +19,7 @@ package state
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/hexutil"
@ -27,6 +28,16 @@ import (
"github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie"
) )
// DumpConfig is a set of options to control what portions of the statewill be
// iterated and collected.
type DumpConfig struct {
SkipCode bool
SkipStorage bool
OnlyWithAddresses bool
Start []byte
Max uint64
}
// DumpCollector interface which the state trie calls during iteration // DumpCollector interface which the state trie calls during iteration
type DumpCollector interface { type DumpCollector interface {
// OnRoot is called with the state root // OnRoot is called with the state root
@ -39,9 +50,9 @@ type DumpCollector interface {
type DumpAccount struct { type DumpAccount struct {
Balance string `json:"balance"` Balance string `json:"balance"`
Nonce uint64 `json:"nonce"` Nonce uint64 `json:"nonce"`
Root string `json:"root"` Root hexutil.Bytes `json:"root"`
CodeHash string `json:"codeHash"` CodeHash hexutil.Bytes `json:"codeHash"`
Code string `json:"code,omitempty"` Code hexutil.Bytes `json:"code,omitempty"`
Storage map[common.Hash]string `json:"storage,omitempty"` Storage map[common.Hash]string `json:"storage,omitempty"`
Address *common.Address `json:"address,omitempty"` // Address only present in iterative (line-by-line) mode Address *common.Address `json:"address,omitempty"` // Address only present in iterative (line-by-line) mode
SecureKey hexutil.Bytes `json:"key,omitempty"` // If we don't have address, we can output the key SecureKey hexutil.Bytes `json:"key,omitempty"` // If we don't have address, we can output the key
@ -111,12 +122,23 @@ func (d iterativeDump) OnRoot(root common.Hash) {
}{root}) }{root})
} }
func (s *StateDB) DumpToCollector(c DumpCollector, excludeCode, excludeStorage, excludeMissingPreimages bool, start []byte, maxResults int) (nextKey []byte) { // DumpToCollector iterates the state according to the given options and inserts
missingPreimages := 0 // the items into a collector for aggregation or serialization.
func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey []byte) {
// Sanitize the input to allow nil configs
if conf == nil {
conf = new(DumpConfig)
}
var (
missingPreimages int
accounts uint64
start = time.Now()
logged = time.Now()
)
log.Info("Trie dumping started", "root", s.trie.Hash())
c.OnRoot(s.trie.Hash()) c.OnRoot(s.trie.Hash())
var count int it := trie.NewIterator(s.trie.NodeIterator(conf.Start))
it := trie.NewIterator(s.trie.NodeIterator(start))
for it.Next() { for it.Next() {
var data Account var data Account
if err := rlp.DecodeBytes(it.Value, &data); err != nil { if err := rlp.DecodeBytes(it.Value, &data); err != nil {
@ -125,24 +147,25 @@ func (s *StateDB) DumpToCollector(c DumpCollector, excludeCode, excludeStorage,
account := DumpAccount{ account := DumpAccount{
Balance: data.Balance.String(), Balance: data.Balance.String(),
Nonce: data.Nonce, Nonce: data.Nonce,
Root: common.Bytes2Hex(data.Root[:]), Root: data.Root[:],
CodeHash: common.Bytes2Hex(data.CodeHash), CodeHash: data.CodeHash,
SecureKey: it.Key,
} }
addrBytes := s.trie.GetKey(it.Key) addrBytes := s.trie.GetKey(it.Key)
if addrBytes == nil { if addrBytes == nil {
// Preimage missing // Preimage missing
missingPreimages++ missingPreimages++
if excludeMissingPreimages { if conf.OnlyWithAddresses {
continue continue
} }
account.SecureKey = it.Key account.SecureKey = it.Key
} }
addr := common.BytesToAddress(addrBytes) addr := common.BytesToAddress(addrBytes)
obj := newObject(s, addr, data) obj := newObject(s, addr, data)
if !excludeCode { if !conf.SkipCode {
account.Code = common.Bytes2Hex(obj.Code(s.db)) account.Code = obj.Code(s.db)
} }
if !excludeStorage { if !conf.SkipStorage {
account.Storage = make(map[common.Hash]string) account.Storage = make(map[common.Hash]string)
storageIt := trie.NewIterator(obj.getTrie(s.db).NodeIterator(nil)) storageIt := trie.NewIterator(obj.getTrie(s.db).NodeIterator(nil))
for storageIt.Next() { for storageIt.Next() {
@ -155,8 +178,13 @@ func (s *StateDB) DumpToCollector(c DumpCollector, excludeCode, excludeStorage,
} }
} }
c.OnAccount(addr, account) c.OnAccount(addr, account)
count++ accounts++
if maxResults > 0 && count >= maxResults { if time.Since(logged) > 8*time.Second {
log.Info("Trie dumping in progress", "at", it.Key, "accounts", accounts,
"elapsed", common.PrettyDuration(time.Since(start)))
logged = time.Now()
}
if conf.Max > 0 && accounts >= conf.Max {
if it.Next() { if it.Next() {
nextKey = it.Key nextKey = it.Key
} }
@ -166,22 +194,24 @@ func (s *StateDB) DumpToCollector(c DumpCollector, excludeCode, excludeStorage,
if missingPreimages > 0 { if missingPreimages > 0 {
log.Warn("Dump incomplete due to missing preimages", "missing", missingPreimages) log.Warn("Dump incomplete due to missing preimages", "missing", missingPreimages)
} }
log.Info("Trie dumping complete", "accounts", accounts,
"elapsed", common.PrettyDuration(time.Since(start)))
return nextKey return nextKey
} }
// RawDump returns the entire state an a single large object // RawDump returns the entire state an a single large object
func (s *StateDB) RawDump(excludeCode, excludeStorage, excludeMissingPreimages bool) Dump { func (s *StateDB) RawDump(opts *DumpConfig) Dump {
dump := &Dump{ dump := &Dump{
Accounts: make(map[common.Address]DumpAccount), Accounts: make(map[common.Address]DumpAccount),
} }
s.DumpToCollector(dump, excludeCode, excludeStorage, excludeMissingPreimages, nil, 0) s.DumpToCollector(dump, opts)
return *dump return *dump
} }
// Dump returns a JSON string representing the entire state as a single json-object // Dump returns a JSON string representing the entire state as a single json-object
func (s *StateDB) Dump(excludeCode, excludeStorage, excludeMissingPreimages bool) []byte { func (s *StateDB) Dump(opts *DumpConfig) []byte {
dump := s.RawDump(excludeCode, excludeStorage, excludeMissingPreimages) dump := s.RawDump(opts)
json, err := json.MarshalIndent(dump, "", " ") json, err := json.MarshalIndent(dump, "", " ")
if err != nil { if err != nil {
fmt.Println("Dump err", err) fmt.Println("Dump err", err)
@ -190,15 +220,15 @@ func (s *StateDB) Dump(excludeCode, excludeStorage, excludeMissingPreimages bool
} }
// IterativeDump dumps out accounts as json-objects, delimited by linebreaks on stdout // IterativeDump dumps out accounts as json-objects, delimited by linebreaks on stdout
func (s *StateDB) IterativeDump(excludeCode, excludeStorage, excludeMissingPreimages bool, output *json.Encoder) { func (s *StateDB) IterativeDump(opts *DumpConfig, output *json.Encoder) {
s.DumpToCollector(iterativeDump{output}, excludeCode, excludeStorage, excludeMissingPreimages, nil, 0) s.DumpToCollector(iterativeDump{output}, opts)
} }
// IteratorDump dumps out a batch of accounts starts with the given start key // IteratorDump dumps out a batch of accounts starts with the given start key
func (s *StateDB) IteratorDump(excludeCode, excludeStorage, excludeMissingPreimages bool, start []byte, maxResults int) IteratorDump { func (s *StateDB) IteratorDump(opts *DumpConfig) IteratorDump {
iterator := &IteratorDump{ iterator := &IteratorDump{
Accounts: make(map[common.Address]DumpAccount), Accounts: make(map[common.Address]DumpAccount),
} }
iterator.Next = s.DumpToCollector(iterator, excludeCode, excludeStorage, excludeMissingPreimages, start, maxResults) iterator.Next = s.DumpToCollector(iterator, opts)
return *iterator return *iterator
} }

@ -57,28 +57,31 @@ func TestDump(t *testing.T) {
s.state.Commit(false) s.state.Commit(false)
// check that DumpToCollector contains the state objects that are in trie // check that DumpToCollector contains the state objects that are in trie
got := string(s.state.Dump(false, false, true)) got := string(s.state.Dump(nil))
want := `{ want := `{
"root": "71edff0130dd2385947095001c73d9e28d862fc286fca2b922ca6f6f3cddfdd2", "root": "71edff0130dd2385947095001c73d9e28d862fc286fca2b922ca6f6f3cddfdd2",
"accounts": { "accounts": {
"0x0000000000000000000000000000000000000001": { "0x0000000000000000000000000000000000000001": {
"balance": "22", "balance": "22",
"nonce": 0, "nonce": 0,
"root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", "root": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"codeHash": "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" "codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
"key": "0x1468288056310c82aa4c01a7e12a10f8111a0560e72b700555479031b86c357d"
}, },
"0x0000000000000000000000000000000000000002": { "0x0000000000000000000000000000000000000002": {
"balance": "44", "balance": "44",
"nonce": 0, "nonce": 0,
"root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", "root": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"codeHash": "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" "codeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470",
"key": "0xd52688a8f926c816ca1e079067caba944f158e764817b83fc43594370ca9cf62"
}, },
"0x0000000000000000000000000000000000000102": { "0x0000000000000000000000000000000000000102": {
"balance": "0", "balance": "0",
"nonce": 0, "nonce": 0,
"root": "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", "root": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
"codeHash": "87874902497a5bb968da31a2998d8f22e949d1ef6214bcdedd8bae24cca4b9e3", "codeHash": "0x87874902497a5bb968da31a2998d8f22e949d1ef6214bcdedd8bae24cca4b9e3",
"code": "03030303030303" "code": "0x03030303030303",
"key": "0xa17eacbc25cda025e81db9c5c62868822c73ce097cee2a63e33a2e41268358a1"
} }
} }
}` }`

@ -264,12 +264,16 @@ func NewPublicDebugAPI(eth *Ethereum) *PublicDebugAPI {
// DumpBlock retrieves the entire state of the database at a given block. // DumpBlock retrieves the entire state of the database at a given block.
func (api *PublicDebugAPI) DumpBlock(blockNr rpc.BlockNumber) (state.Dump, error) { func (api *PublicDebugAPI) DumpBlock(blockNr rpc.BlockNumber) (state.Dump, error) {
opts := &state.DumpConfig{
OnlyWithAddresses: true,
Max: AccountRangeMaxResults, // Sanity limit over RPC
}
if blockNr == rpc.PendingBlockNumber { if blockNr == rpc.PendingBlockNumber {
// If we're dumping the pending state, we need to request // If we're dumping the pending state, we need to request
// both the pending block as well as the pending state from // both the pending block as well as the pending state from
// the miner and operate on those // the miner and operate on those
_, stateDb := api.eth.miner.Pending() _, stateDb := api.eth.miner.Pending()
return stateDb.RawDump(false, false, true), nil return stateDb.RawDump(opts), nil
} }
var block *types.Block var block *types.Block
if blockNr == rpc.LatestBlockNumber { if blockNr == rpc.LatestBlockNumber {
@ -284,7 +288,7 @@ func (api *PublicDebugAPI) DumpBlock(blockNr rpc.BlockNumber) (state.Dump, error
if err != nil { if err != nil {
return state.Dump{}, err return state.Dump{}, err
} }
return stateDb.RawDump(false, false, true), nil return stateDb.RawDump(opts), nil
} }
// PrivateDebugAPI is the collection of Ethereum full node APIs exposed over // PrivateDebugAPI is the collection of Ethereum full node APIs exposed over
@ -386,10 +390,17 @@ func (api *PublicDebugAPI) AccountRange(blockNrOrHash rpc.BlockNumberOrHash, sta
return state.IteratorDump{}, errors.New("either block number or block hash must be specified") return state.IteratorDump{}, errors.New("either block number or block hash must be specified")
} }
opts := &state.DumpConfig{
SkipCode: nocode,
SkipStorage: nostorage,
OnlyWithAddresses: !incompletes,
Start: start,
Max: uint64(maxResults),
}
if maxResults > AccountRangeMaxResults || maxResults <= 0 { if maxResults > AccountRangeMaxResults || maxResults <= 0 {
maxResults = AccountRangeMaxResults opts.Max = AccountRangeMaxResults
} }
return stateDb.IteratorDump(nocode, nostorage, incompletes, start, maxResults), nil return stateDb.IteratorDump(opts), nil
} }
// StorageRangeResult is the result of a debug_storageRangeAt API call. // StorageRangeResult is the result of a debug_storageRangeAt API call.

@ -34,7 +34,13 @@ import (
var dumper = spew.ConfigState{Indent: " "} var dumper = spew.ConfigState{Indent: " "}
func accountRangeTest(t *testing.T, trie *state.Trie, statedb *state.StateDB, start common.Hash, requestedNum int, expectedNum int) state.IteratorDump { func accountRangeTest(t *testing.T, trie *state.Trie, statedb *state.StateDB, start common.Hash, requestedNum int, expectedNum int) state.IteratorDump {
result := statedb.IteratorDump(true, true, false, start.Bytes(), requestedNum) result := statedb.IteratorDump(&state.DumpConfig{
SkipCode: true,
SkipStorage: true,
OnlyWithAddresses: false,
Start: start.Bytes(),
Max: uint64(requestedNum),
})
if len(result.Accounts) != expectedNum { if len(result.Accounts) != expectedNum {
t.Fatalf("expected %d results, got %d", expectedNum, len(result.Accounts)) t.Fatalf("expected %d results, got %d", expectedNum, len(result.Accounts))
@ -132,11 +138,16 @@ func TestEmptyAccountRange(t *testing.T) {
var ( var (
statedb = state.NewDatabase(rawdb.NewMemoryDatabase()) statedb = state.NewDatabase(rawdb.NewMemoryDatabase())
state, _ = state.New(common.Hash{}, statedb, nil) st, _ = state.New(common.Hash{}, statedb, nil)
) )
state.Commit(true) st.Commit(true)
state.IntermediateRoot(true) st.IntermediateRoot(true)
results := state.IteratorDump(true, true, true, (common.Hash{}).Bytes(), AccountRangeMaxResults) results := st.IteratorDump(&state.DumpConfig{
SkipCode: true,
SkipStorage: true,
OnlyWithAddresses: true,
Max: uint64(AccountRangeMaxResults),
})
if bytes.Equal(results.Next, (common.Hash{}).Bytes()) { if bytes.Equal(results.Next, (common.Hash{}).Bytes()) {
t.Fatalf("Empty results should not return a second page") t.Fatalf("Empty results should not return a second page")
} }

Loading…
Cancel
Save