@ -34,9 +34,11 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/console/prompt"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/trie"
"github.com/olekukonko/tablewriter"
"gopkg.in/urfave/cli.v1"
)
@ -69,6 +71,7 @@ Remove blockchain and state databases`,
dbDumpFreezerIndex ,
dbImportCmd ,
dbExportCmd ,
dbMetadataCmd ,
} ,
}
dbInspectCmd = cli . Command {
@ -233,6 +236,21 @@ WARNING: This is a low-level operation which may cause database corruption!`,
} ,
Description : "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed." ,
}
dbMetadataCmd = cli . Command {
Action : utils . MigrateFlags ( showMetaData ) ,
Name : "metadata" ,
Usage : "Shows metadata about the chain status." ,
Flags : [ ] cli . Flag {
utils . DataDirFlag ,
utils . SyncModeFlag ,
utils . MainnetFlag ,
utils . RopstenFlag ,
utils . SepoliaFlag ,
utils . RinkebyFlag ,
utils . GoerliFlag ,
} ,
Description : "Shows metadata about the chain status." ,
}
)
func removeDB ( ctx * cli . Context ) error {
@ -685,3 +703,50 @@ func exportChaindata(ctx *cli.Context) error {
db := utils . MakeChainDatabase ( ctx , stack , true )
return utils . ExportChaindata ( ctx . Args ( ) . Get ( 1 ) , kind , exporter ( db ) , stop )
}
func showMetaData ( ctx * cli . Context ) error {
stack , _ := makeConfigNode ( ctx )
defer stack . Close ( )
db := utils . MakeChainDatabase ( ctx , stack , true )
ancients , err := db . Ancients ( )
if err != nil {
fmt . Fprintf ( os . Stderr , "Error accessing ancients: %v" , err )
}
pp := func ( val * uint64 ) string {
if val == nil {
return "<nil>"
}
return fmt . Sprintf ( "%d (0x%x)" , * val , * val )
}
data := [ ] [ ] string {
{ "databaseVersion" , pp ( rawdb . ReadDatabaseVersion ( db ) ) } ,
{ "headBlockHash" , fmt . Sprintf ( "%v" , rawdb . ReadHeadBlockHash ( db ) ) } ,
{ "headFastBlockHash" , fmt . Sprintf ( "%v" , rawdb . ReadHeadFastBlockHash ( db ) ) } ,
{ "headHeaderHash" , fmt . Sprintf ( "%v" , rawdb . ReadHeadHeaderHash ( db ) ) } }
if b := rawdb . ReadHeadBlock ( db ) ; b != nil {
data = append ( data , [ ] string { "headBlock.Hash" , fmt . Sprintf ( "%v" , b . Hash ( ) ) } )
data = append ( data , [ ] string { "headBlock.Root" , fmt . Sprintf ( "%v" , b . Root ( ) ) } )
data = append ( data , [ ] string { "headBlock.Number" , fmt . Sprintf ( "%d (0x%x)" , b . Number ( ) , b . Number ( ) ) } )
}
if h := rawdb . ReadHeadHeader ( db ) ; h != nil {
data = append ( data , [ ] string { "headHeader.Hash" , fmt . Sprintf ( "%v" , h . Hash ( ) ) } )
data = append ( data , [ ] string { "headHeader.Root" , fmt . Sprintf ( "%v" , h . Root ) } )
data = append ( data , [ ] string { "headHeader.Number" , fmt . Sprintf ( "%d (0x%x)" , h . Number , h . Number ) } )
}
data = append ( data , [ ] [ ] string { { "frozen" , fmt . Sprintf ( "%d items" , ancients ) } ,
{ "lastPivotNumber" , pp ( rawdb . ReadLastPivotNumber ( db ) ) } ,
{ "len(snapshotSyncStatus)" , fmt . Sprintf ( "%d bytes" , len ( rawdb . ReadSnapshotSyncStatus ( db ) ) ) } ,
{ "snapshotGenerator" , snapshot . ParseGeneratorStatus ( rawdb . ReadSnapshotGenerator ( db ) ) } ,
{ "snapshotDisabled" , fmt . Sprintf ( "%v" , rawdb . ReadSnapshotDisabled ( db ) ) } ,
{ "snapshotJournal" , fmt . Sprintf ( "%d bytes" , len ( rawdb . ReadSnapshotJournal ( db ) ) ) } ,
{ "snapshotRecoveryNumber" , pp ( rawdb . ReadSnapshotRecoveryNumber ( db ) ) } ,
{ "snapshotRoot" , fmt . Sprintf ( "%v" , rawdb . ReadSnapshotRoot ( db ) ) } ,
{ "txIndexTail" , pp ( rawdb . ReadTxIndexTail ( db ) ) } ,
{ "fastTxLookupLimit" , pp ( rawdb . ReadFastTxLookupLimit ( db ) ) } ,
} ... )
table := tablewriter . NewWriter ( os . Stdout )
table . SetHeader ( [ ] string { "Field" , "Value" } )
table . AppendBulk ( data )
table . Render ( )
return nil
}