cmd/geth, ethdb/pebble: improve database statistic (#29948)

* cmd/geth, ethdb/pebble: polish method naming and code comment

* implement db stat for pebble

* cmd, core, ethdb, internal, trie: remove db property selector

* cmd, core, ethdb: fix function description

---------

Co-authored-by: prpeh <prpeh@proton.me>
Co-authored-by: Gary Rong <garyrong0905@gmail.com>
pull/30026/head
Ha DANG 5 months ago committed by GitHub
parent 7cf6a63687
commit 67a862db9d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
  1. 4
      cmd/geth/chaincmd.go
  2. 20
      cmd/geth/dbcmd.go
  3. 6
      core/rawdb/table.go
  4. 4
      ethdb/database.go
  5. 54
      ethdb/leveldb/leveldb.go
  6. 6
      ethdb/memorydb/memorydb.go
  7. 6
      ethdb/pebble/pebble.go
  8. 4
      ethdb/remotedb/remotedb.go
  9. 4
      internal/ethapi/api.go
  10. 1
      internal/web3ext/web3ext.go
  11. 2
      trie/trie_test.go

@ -336,7 +336,7 @@ func importChain(ctx *cli.Context) error {
fmt.Printf("Import done in %v.\n\n", time.Since(start))
// Output pre-compaction stats mostly to see the import trashing
showLeveldbStats(db)
showDBStats(db)
// Print the memory statistics used by the importing
mem := new(runtime.MemStats)
@ -359,7 +359,7 @@ func importChain(ctx *cli.Context) error {
}
fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
showLeveldbStats(db)
showDBStats(db)
return importErr
}

@ -407,17 +407,13 @@ func checkStateContent(ctx *cli.Context) error {
return nil
}
func showLeveldbStats(db ethdb.KeyValueStater) {
if stats, err := db.Stat("leveldb.stats"); err != nil {
func showDBStats(db ethdb.KeyValueStater) {
stats, err := db.Stat()
if err != nil {
log.Warn("Failed to read database stats", "error", err)
} else {
fmt.Println(stats)
}
if ioStats, err := db.Stat("leveldb.iostats"); err != nil {
log.Warn("Failed to read database iostats", "error", err)
} else {
fmt.Println(ioStats)
return
}
fmt.Println(stats)
}
func dbStats(ctx *cli.Context) error {
@ -427,7 +423,7 @@ func dbStats(ctx *cli.Context) error {
db := utils.MakeChainDatabase(ctx, stack, true)
defer db.Close()
showLeveldbStats(db)
showDBStats(db)
return nil
}
@ -439,7 +435,7 @@ func dbCompact(ctx *cli.Context) error {
defer db.Close()
log.Info("Stats before compaction")
showLeveldbStats(db)
showDBStats(db)
log.Info("Triggering compaction")
if err := db.Compact(nil, nil); err != nil {
@ -447,7 +443,7 @@ func dbCompact(ctx *cli.Context) error {
return err
}
log.Info("Stats after compaction")
showLeveldbStats(db)
showDBStats(db)
return nil
}

@ -147,9 +147,9 @@ func (t *table) NewIterator(prefix []byte, start []byte) ethdb.Iterator {
}
}
// Stat returns a particular internal stat of the database.
func (t *table) Stat(property string) (string, error) {
return t.db.Stat(property)
// Stat returns the statistic data of the database.
func (t *table) Stat() (string, error) {
return t.db.Stat()
}
// Compact flattens the underlying data store for the given key range. In essence,

@ -39,8 +39,8 @@ type KeyValueWriter interface {
// KeyValueStater wraps the Stat method of a backing data store.
type KeyValueStater interface {
// Stat returns a particular internal stat of the database.
Stat(property string) (string, error)
// Stat returns the statistic data of the database.
Stat() (string, error)
}
// Compacter wraps the Compact method of a backing data store.

@ -22,7 +22,6 @@ package leveldb
import (
"fmt"
"strings"
"sync"
"time"
@ -244,14 +243,53 @@ func (db *Database) NewSnapshot() (ethdb.Snapshot, error) {
return &snapshot{db: snap}, nil
}
// Stat returns a particular internal stat of the database.
func (db *Database) Stat(property string) (string, error) {
if property == "" {
property = "leveldb.stats"
} else if !strings.HasPrefix(property, "leveldb.") {
property = "leveldb." + property
// Stat returns the statistic data of the database.
func (db *Database) Stat() (string, error) {
var stats leveldb.DBStats
if err := db.db.Stats(&stats); err != nil {
return "", err
}
return db.db.GetProperty(property)
var (
message string
totalRead int64
totalWrite int64
totalSize int64
totalTables int
totalDuration time.Duration
)
if len(stats.LevelSizes) > 0 {
message += " Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" +
"-------+------------+---------------+---------------+---------------+---------------\n"
for level, size := range stats.LevelSizes {
read := stats.LevelRead[level]
write := stats.LevelWrite[level]
duration := stats.LevelDurations[level]
tables := stats.LevelTablesCounts[level]
if tables == 0 && duration == 0 {
continue
}
totalTables += tables
totalSize += size
totalRead += read
totalWrite += write
totalDuration += duration
message += fmt.Sprintf(" %3d | %10d | %13.5f | %13.5f | %13.5f | %13.5f\n",
level, tables, float64(size)/1048576.0, duration.Seconds(),
float64(read)/1048576.0, float64(write)/1048576.0)
}
message += "-------+------------+---------------+---------------+---------------+---------------\n"
message += fmt.Sprintf(" Total | %10d | %13.5f | %13.5f | %13.5f | %13.5f\n",
totalTables, float64(totalSize)/1048576.0, totalDuration.Seconds(),
float64(totalRead)/1048576.0, float64(totalWrite)/1048576.0)
message += "-------+------------+---------------+---------------+---------------+---------------\n\n"
}
message += fmt.Sprintf("Read(MB):%.5f Write(MB):%.5f\n", float64(stats.IORead)/1048576.0, float64(stats.IOWrite)/1048576.0)
message += fmt.Sprintf("BlockCache(MB):%.5f FileCache:%d\n", float64(stats.BlockCacheSize)/1048576.0, stats.OpenedTablesCount)
message += fmt.Sprintf("MemoryCompaction:%d Level0Compaction:%d NonLevel0Compaction:%d SeekCompaction:%d\n", stats.MemComp, stats.Level0Comp, stats.NonLevel0Comp, stats.SeekComp)
message += fmt.Sprintf("WriteDelayCount:%d WriteDelayDuration:%s Paused:%t\n", stats.WriteDelayCount, common.PrettyDuration(stats.WriteDelayDuration), stats.WritePaused)
message += fmt.Sprintf("Snapshots:%d Iterators:%d\n", stats.AliveSnapshots, stats.AliveIterators)
return message, nil
}
// Compact flattens the underlying data store for the given key range. In essence,

@ -182,9 +182,9 @@ func (db *Database) NewSnapshot() (ethdb.Snapshot, error) {
return newSnapshot(db), nil
}
// Stat returns a particular internal stat of the database.
func (db *Database) Stat(property string) (string, error) {
return "", errors.New("unknown property")
// Stat returns the statistic data of the database.
func (db *Database) Stat() (string, error) {
return "", nil
}
// Compact is not supported on a memory database, but there's no need either as

@ -416,10 +416,8 @@ func upperBound(prefix []byte) (limit []byte) {
}
// Stat returns the internal metrics of Pebble in a text format. It's a developer
// method to read everything there is to read independent of Pebble version.
//
// The property is unused in Pebble as there's only one thing to retrieve.
func (d *Database) Stat(property string) (string, error) {
// method to read everything there is to read, independent of Pebble version.
func (d *Database) Stat() (string, error) {
return d.db.Metrics().String(), nil
}

@ -126,8 +126,8 @@ func (db *Database) NewIterator(prefix []byte, start []byte) ethdb.Iterator {
panic("not supported")
}
func (db *Database) Stat(property string) (string, error) {
panic("not supported")
func (db *Database) Stat() (string, error) {
return "", nil
}
func (db *Database) AncientDatadir() (string, error) {

@ -2108,8 +2108,8 @@ func (api *DebugAPI) PrintBlock(ctx context.Context, number uint64) (string, err
}
// ChaindbProperty returns leveldb properties of the key-value database.
func (api *DebugAPI) ChaindbProperty(property string) (string, error) {
return api.b.ChainDb().Stat(property)
func (api *DebugAPI) ChaindbProperty() (string, error) {
return api.b.ChainDb().Stat()
}
// ChaindbCompact flattens the entire key-value database into a single level,

@ -263,7 +263,6 @@ web3._extend({
new web3._extend.Method({
name: 'chaindbProperty',
call: 'debug_chaindbProperty',
params: 1,
outputFormatter: console.log
}),
new web3._extend.Method({

@ -820,7 +820,7 @@ func (s *spongeDb) Delete(key []byte) error { panic("implement
func (s *spongeDb) NewBatch() ethdb.Batch { return &spongeBatch{s} }
func (s *spongeDb) NewBatchWithSize(size int) ethdb.Batch { return &spongeBatch{s} }
func (s *spongeDb) NewSnapshot() (ethdb.Snapshot, error) { panic("implement me") }
func (s *spongeDb) Stat(property string) (string, error) { panic("implement me") }
func (s *spongeDb) Stat() (string, error) { panic("implement me") }
func (s *spongeDb) Compact(start []byte, limit []byte) error { panic("implement me") }
func (s *spongeDb) Close() error { return nil }
func (s *spongeDb) Put(key []byte, value []byte) error {

Loading…
Cancel
Save