diff --git a/cmd/geth/config.go b/cmd/geth/config.go index 37d17fb1e7..cf4cdef76c 100644 --- a/cmd/geth/config.go +++ b/cmd/geth/config.go @@ -267,7 +267,7 @@ func applyMetricConfig(ctx *cli.Context, cfg *gethConfig) { cfg.Metrics.Enabled = ctx.Bool(utils.MetricsEnabledFlag.Name) } if ctx.IsSet(utils.MetricsEnabledExpensiveFlag.Name) { - cfg.Metrics.EnabledExpensive = ctx.Bool(utils.MetricsEnabledExpensiveFlag.Name) + log.Warn("Expensive metrics are collected by default, please remove this flag", "flag", utils.MetricsEnabledExpensiveFlag.Name) } if ctx.IsSet(utils.MetricsHTTPFlag.Name) { cfg.Metrics.HTTP = ctx.String(utils.MetricsHTTPFlag.Name) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index e002975d53..b38f33b8dd 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -862,12 +862,6 @@ var ( Usage: "Enable metrics collection and reporting", Category: flags.MetricsCategory, } - MetricsEnabledExpensiveFlag = &cli.BoolFlag{ - Name: "metrics.expensive", - Usage: "Enable expensive metrics collection and reporting", - Category: flags.MetricsCategory, - } - // MetricsHTTPFlag defines the endpoint for a stand-alone metrics HTTP endpoint. // Since the pprof service enables sensitive/vulnerable behavior, this allows a user // to enable a public-OK metrics endpoint without having to worry about ALSO exposing diff --git a/cmd/utils/flags_legacy.go b/cmd/utils/flags_legacy.go index 49321053c6..1dfd1a5f89 100644 --- a/cmd/utils/flags_legacy.go +++ b/cmd/utils/flags_legacy.go @@ -93,35 +93,35 @@ var ( Name: "light.serve", Usage: "Maximum percentage of time allowed for serving LES requests (deprecated)", Value: ethconfig.Defaults.LightServ, - Category: flags.LightCategory, + Category: flags.DeprecatedCategory, } LightIngressFlag = &cli.IntFlag{ Name: "light.ingress", Usage: "Incoming bandwidth limit for serving light clients (deprecated)", Value: ethconfig.Defaults.LightIngress, - Category: flags.LightCategory, + Category: flags.DeprecatedCategory, } LightEgressFlag = &cli.IntFlag{ Name: "light.egress", Usage: "Outgoing bandwidth limit for serving light clients (deprecated)", Value: ethconfig.Defaults.LightEgress, - Category: flags.LightCategory, + Category: flags.DeprecatedCategory, } LightMaxPeersFlag = &cli.IntFlag{ Name: "light.maxpeers", Usage: "Maximum number of light clients to serve, or light servers to attach to (deprecated)", Value: ethconfig.Defaults.LightPeers, - Category: flags.LightCategory, + Category: flags.DeprecatedCategory, } LightNoPruneFlag = &cli.BoolFlag{ Name: "light.nopruning", Usage: "Disable ancient light chain data pruning (deprecated)", - Category: flags.LightCategory, + Category: flags.DeprecatedCategory, } LightNoSyncServeFlag = &cli.BoolFlag{ Name: "light.nosyncserve", Usage: "Enables serving light clients before syncing (deprecated)", - Category: flags.LightCategory, + Category: flags.DeprecatedCategory, } // Deprecated November 2023 LogBacktraceAtFlag = &cli.StringFlag{ @@ -138,19 +138,24 @@ var ( // Deprecated February 2024 MinerNewPayloadTimeoutFlag = &cli.DurationFlag{ Name: "miner.newpayload-timeout", - Usage: "Specify the maximum time allowance for creating a new payload", + Usage: "Specify the maximum time allowance for creating a new payload (deprecated)", Value: ethconfig.Defaults.Miner.Recommit, - Category: flags.MinerCategory, + Category: flags.DeprecatedCategory, } MinerEtherbaseFlag = &cli.StringFlag{ Name: "miner.etherbase", - Usage: "0x prefixed public address for block mining rewards", - Category: flags.MinerCategory, + Usage: "0x prefixed public address for block mining rewards (deprecated)", + Category: flags.DeprecatedCategory, } MiningEnabledFlag = &cli.BoolFlag{ Name: "mine", - Usage: "Enable mining", - Category: flags.MinerCategory, + Usage: "Enable mining (deprecated)", + Category: flags.DeprecatedCategory, + } + MetricsEnabledExpensiveFlag = &cli.BoolFlag{ + Name: "metrics.expensive", + Usage: "Enable expensive metrics collection and reporting (deprecated)", + Category: flags.DeprecatedCategory, } ) diff --git a/core/blockchain.go b/core/blockchain.go index 67b49cfe02..9bd7fdcd95 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -61,26 +61,26 @@ var ( chainInfoGauge = metrics.NewRegisteredGaugeInfo("chain/info", nil) - accountReadTimer = metrics.NewRegisteredTimer("chain/account/reads", nil) - accountHashTimer = metrics.NewRegisteredTimer("chain/account/hashes", nil) - accountUpdateTimer = metrics.NewRegisteredTimer("chain/account/updates", nil) - accountCommitTimer = metrics.NewRegisteredTimer("chain/account/commits", nil) - - storageReadTimer = metrics.NewRegisteredTimer("chain/storage/reads", nil) - storageHashTimer = metrics.NewRegisteredTimer("chain/storage/hashes", nil) - storageUpdateTimer = metrics.NewRegisteredTimer("chain/storage/updates", nil) - storageCommitTimer = metrics.NewRegisteredTimer("chain/storage/commits", nil) - - snapshotAccountReadTimer = metrics.NewRegisteredTimer("chain/snapshot/account/reads", nil) - snapshotStorageReadTimer = metrics.NewRegisteredTimer("chain/snapshot/storage/reads", nil) - snapshotCommitTimer = metrics.NewRegisteredTimer("chain/snapshot/commits", nil) - - triedbCommitTimer = metrics.NewRegisteredTimer("chain/triedb/commits", nil) - - blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil) - blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil) - blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil) - blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil) + accountReadTimer = metrics.NewRegisteredResettingTimer("chain/account/reads", nil) + accountHashTimer = metrics.NewRegisteredResettingTimer("chain/account/hashes", nil) + accountUpdateTimer = metrics.NewRegisteredResettingTimer("chain/account/updates", nil) + accountCommitTimer = metrics.NewRegisteredResettingTimer("chain/account/commits", nil) + + storageReadTimer = metrics.NewRegisteredResettingTimer("chain/storage/reads", nil) + storageHashTimer = metrics.NewRegisteredResettingTimer("chain/storage/hashes", nil) + storageUpdateTimer = metrics.NewRegisteredResettingTimer("chain/storage/updates", nil) + storageCommitTimer = metrics.NewRegisteredResettingTimer("chain/storage/commits", nil) + + snapshotAccountReadTimer = metrics.NewRegisteredResettingTimer("chain/snapshot/account/reads", nil) + snapshotStorageReadTimer = metrics.NewRegisteredResettingTimer("chain/snapshot/storage/reads", nil) + snapshotCommitTimer = metrics.NewRegisteredResettingTimer("chain/snapshot/commits", nil) + + triedbCommitTimer = metrics.NewRegisteredResettingTimer("chain/triedb/commits", nil) + + blockInsertTimer = metrics.NewRegisteredResettingTimer("chain/inserts", nil) + blockValidationTimer = metrics.NewRegisteredResettingTimer("chain/validation", nil) + blockExecutionTimer = metrics.NewRegisteredResettingTimer("chain/execution", nil) + blockWriteTimer = metrics.NewRegisteredResettingTimer("chain/write", nil) blockReorgMeter = metrics.NewRegisteredMeter("chain/reorg/executes", nil) blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil) diff --git a/core/state/state_object.go b/core/state/state_object.go index fc26af68db..6dea68465b 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -25,7 +25,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie/trienode" "github.com/holiman/uint256" @@ -197,9 +196,8 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { if s.db.snap != nil { start := time.Now() enc, err = s.db.snap.Storage(s.addrHash, crypto.Keccak256Hash(key.Bytes())) - if metrics.EnabledExpensive { - s.db.SnapshotStorageReads += time.Since(start) - } + s.db.SnapshotStorageReads += time.Since(start) + if len(enc) > 0 { _, content, _, err := rlp.Split(enc) if err != nil { @@ -217,9 +215,8 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { return common.Hash{} } val, err := tr.GetStorage(s.address, key.Bytes()) - if metrics.EnabledExpensive { - s.db.StorageReads += time.Since(start) - } + s.db.StorageReads += time.Since(start) + if err != nil { s.db.setError(err) return common.Hash{} @@ -283,9 +280,8 @@ func (s *stateObject) updateTrie() (Trie, error) { return s.trie, nil } // Track the amount of time wasted on updating the storage trie - if metrics.EnabledExpensive { - defer func(start time.Time) { s.db.StorageUpdates += time.Since(start) }(time.Now()) - } + defer func(start time.Time) { s.db.StorageUpdates += time.Since(start) }(time.Now()) + // The snapshot storage map for the object var ( storage map[common.Hash][]byte @@ -370,9 +366,8 @@ func (s *stateObject) updateRoot() { return } // Track the amount of time wasted on hashing the storage trie - if metrics.EnabledExpensive { - defer func(start time.Time) { s.db.StorageHashes += time.Since(start) }(time.Now()) - } + defer func(start time.Time) { s.db.StorageHashes += time.Since(start) }(time.Now()) + s.data.Root = tr.Hash() } @@ -386,9 +381,8 @@ func (s *stateObject) commit() (*trienode.NodeSet, error) { return nil, nil } // Track the amount of time wasted on committing the storage trie - if metrics.EnabledExpensive { - defer func(start time.Time) { s.db.StorageCommits += time.Since(start) }(time.Now()) - } + defer func(start time.Time) { s.db.StorageCommits += time.Since(start) }(time.Now()) + // The trie is currently in an open state and could potentially contain // cached mutations. Call commit to acquire a set of nodes that have been // modified, the set can be nil if nothing to commit. diff --git a/core/state/statedb.go b/core/state/statedb.go index 4d1163d3c6..f90b30f399 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -28,7 +28,6 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" @@ -495,9 +494,8 @@ func (s *StateDB) GetTransientState(addr common.Address, key common.Hash) common // updateStateObject writes the given object to the trie. func (s *StateDB) updateStateObject(obj *stateObject) { // Track the amount of time wasted on updating the account from the trie - if metrics.EnabledExpensive { - defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now()) - } + defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now()) + // Encode the account and update the account trie addr := obj.Address() if err := s.trie.UpdateAccount(addr, &obj.data); err != nil { @@ -527,9 +525,8 @@ func (s *StateDB) updateStateObject(obj *stateObject) { // deleteStateObject removes the given object from the state trie. func (s *StateDB) deleteStateObject(obj *stateObject) { // Track the amount of time wasted on deleting the account from the trie - if metrics.EnabledExpensive { - defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now()) - } + defer func(start time.Time) { s.AccountUpdates += time.Since(start) }(time.Now()) + // Delete the account from the trie addr := obj.Address() if err := s.trie.DeleteAccount(addr); err != nil { @@ -561,9 +558,8 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject { if s.snap != nil { start := time.Now() acc, err := s.snap.Account(crypto.HashData(s.hasher, addr.Bytes())) - if metrics.EnabledExpensive { - s.SnapshotAccountReads += time.Since(start) - } + s.SnapshotAccountReads += time.Since(start) + if err == nil { if acc == nil { return nil @@ -587,9 +583,8 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject { start := time.Now() var err error data, err = s.trie.GetAccount(addr) - if metrics.EnabledExpensive { - s.AccountReads += time.Since(start) - } + s.AccountReads += time.Since(start) + if err != nil { s.setError(fmt.Errorf("getDeleteStateObject (%x) error: %w", addr.Bytes(), err)) return nil @@ -917,9 +912,8 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { s.stateObjectsPending = make(map[common.Address]struct{}) } // Track the amount of time wasted on hashing the account trie - if metrics.EnabledExpensive { - defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now()) - } + defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now()) + return s.trie.Hash() } @@ -1042,16 +1036,16 @@ func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root if err != nil { return nil, nil, err } - if metrics.EnabledExpensive { - n := int64(len(slots)) + // Report the metrics + n := int64(len(slots)) - slotDeletionMaxCount.UpdateIfGt(int64(len(slots))) - slotDeletionMaxSize.UpdateIfGt(int64(size)) + slotDeletionMaxCount.UpdateIfGt(int64(len(slots))) + slotDeletionMaxSize.UpdateIfGt(int64(size)) + + slotDeletionTimer.UpdateSince(start) + slotDeletionCount.Mark(n) + slotDeletionSize.Mark(int64(size)) - slotDeletionTimer.UpdateSince(start) - slotDeletionCount.Mark(n) - slotDeletionSize.Mark(int64(size)) - } return slots, nodes, nil } @@ -1190,10 +1184,8 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er } } // Write the account trie changes, measuring the amount of wasted time - var start time.Time - if metrics.EnabledExpensive { - start = time.Now() - } + start := time.Now() + root, set, err := s.trie.Commit(true) if err != nil { return common.Hash{}, err @@ -1205,23 +1197,23 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er } accountTrieNodesUpdated, accountTrieNodesDeleted = set.Size() } - if metrics.EnabledExpensive { - s.AccountCommits += time.Since(start) + // Report the commit metrics + s.AccountCommits += time.Since(start) + + accountUpdatedMeter.Mark(int64(s.AccountUpdated)) + storageUpdatedMeter.Mark(int64(s.StorageUpdated)) + accountDeletedMeter.Mark(int64(s.AccountDeleted)) + storageDeletedMeter.Mark(int64(s.StorageDeleted)) + accountTrieUpdatedMeter.Mark(int64(accountTrieNodesUpdated)) + accountTrieDeletedMeter.Mark(int64(accountTrieNodesDeleted)) + storageTriesUpdatedMeter.Mark(int64(storageTrieNodesUpdated)) + storageTriesDeletedMeter.Mark(int64(storageTrieNodesDeleted)) + s.AccountUpdated, s.AccountDeleted = 0, 0 + s.StorageUpdated, s.StorageDeleted = 0, 0 - accountUpdatedMeter.Mark(int64(s.AccountUpdated)) - storageUpdatedMeter.Mark(int64(s.StorageUpdated)) - accountDeletedMeter.Mark(int64(s.AccountDeleted)) - storageDeletedMeter.Mark(int64(s.StorageDeleted)) - accountTrieUpdatedMeter.Mark(int64(accountTrieNodesUpdated)) - accountTrieDeletedMeter.Mark(int64(accountTrieNodesDeleted)) - storageTriesUpdatedMeter.Mark(int64(storageTrieNodesUpdated)) - storageTriesDeletedMeter.Mark(int64(storageTrieNodesDeleted)) - s.AccountUpdated, s.AccountDeleted = 0, 0 - s.StorageUpdated, s.StorageDeleted = 0, 0 - } // If snapshotting is enabled, update the snapshot tree with this new version if s.snap != nil { - start := time.Now() + start = time.Now() // Only update if there's a state transition (skip empty Clique blocks) if parent := s.snap.Root(); parent != root { if err := s.snaps.Update(root, parent, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages); err != nil { @@ -1235,9 +1227,7 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er log.Warn("Failed to cap snapshot tree", "root", root, "layers", 128, "err", err) } } - if metrics.EnabledExpensive { - s.SnapshotCommits += time.Since(start) - } + s.SnapshotCommits += time.Since(start) s.snap = nil } if root == (common.Hash{}) { @@ -1248,15 +1238,14 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er origin = types.EmptyRootHash } if root != origin { - start := time.Now() + start = time.Now() set := triestate.New(s.accountsOrigin, s.storagesOrigin) if err := s.db.TrieDB().Update(root, origin, block, nodes, set); err != nil { return common.Hash{}, err } s.originalRoot = root - if metrics.EnabledExpensive { - s.TrieDBCommits += time.Since(start) - } + s.TrieDBCommits += time.Since(start) + if s.onCommit != nil { s.onCommit(set) } diff --git a/internal/flags/categories.go b/internal/flags/categories.go index c044e28f38..d426add55b 100644 --- a/internal/flags/categories.go +++ b/internal/flags/categories.go @@ -21,7 +21,6 @@ import "github.com/urfave/cli/v2" const ( EthCategory = "ETHEREUM" BeaconCategory = "BEACON CHAIN" - LightCategory = "LIGHT CLIENT" DevCategory = "DEVELOPER CHAIN" StateCategory = "STATE HISTORY MANAGEMENT" TxPoolCategory = "TRANSACTION POOL (EVM)" diff --git a/metrics/config.go b/metrics/config.go index 2eb09fb48a..72f94dd194 100644 --- a/metrics/config.go +++ b/metrics/config.go @@ -19,7 +19,7 @@ package metrics // Config contains the configuration for the metric collection. type Config struct { Enabled bool `toml:",omitempty"` - EnabledExpensive bool `toml:",omitempty"` + EnabledExpensive bool `toml:"-"` HTTP string `toml:",omitempty"` Port int `toml:",omitempty"` EnableInfluxDB bool `toml:",omitempty"` diff --git a/metrics/influxdb/influxdb.go b/metrics/influxdb/influxdb.go index bbc4fc024b..5c8501fd9d 100644 --- a/metrics/influxdb/influxdb.go +++ b/metrics/influxdb/influxdb.go @@ -98,20 +98,23 @@ func readMeter(namespace, name string, i interface{}) (string, map[string]interf } return measurement, fields case metrics.ResettingTimer: - t := metric.Snapshot() - if t.Count() == 0 { + ms := metric.Snapshot() + if ms.Count() == 0 { break } - ps := t.Percentiles([]float64{0.50, 0.95, 0.99}) - measurement := fmt.Sprintf("%s%s.span", namespace, name) + ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999}) + measurement := fmt.Sprintf("%s%s.timer", namespace, name) fields := map[string]interface{}{ - "count": t.Count(), - "max": t.Max(), - "mean": t.Mean(), - "min": t.Min(), - "p50": int(ps[0]), - "p95": int(ps[1]), - "p99": int(ps[2]), + "count": ms.Count(), + "max": ms.Max(), + "mean": ms.Mean(), + "min": ms.Min(), + "p50": ps[0], + "p75": ps[1], + "p95": ps[2], + "p99": ps[3], + "p999": ps[4], + "p9999": ps[5], } return measurement, fields } diff --git a/metrics/influxdb/testdata/influxdbv1.want b/metrics/influxdb/testdata/influxdbv1.want index 9443faedc5..ded9434c73 100644 --- a/metrics/influxdb/testdata/influxdbv1.want +++ b/metrics/influxdb/testdata/influxdbv1.want @@ -7,5 +7,5 @@ goth.test/gauge_float64.gauge value=34567.89 978307200000000000 goth.test/gauge_info.gauge value="{\"arch\":\"amd64\",\"commit\":\"7caa2d8163ae3132c1c2d6978c76610caee2d949\",\"os\":\"linux\",\"protocol_versions\":\"64 65 66\",\"version\":\"1.10.18-unstable\"}" 978307200000000000 goth.test/histogram.histogram count=3i,max=3i,mean=2,min=1i,p25=1,p50=2,p75=3,p95=3,p99=3,p999=3,p9999=3,stddev=0.816496580927726,variance=0.6666666666666666 978307200000000000 goth.test/meter.meter count=0i,m1=0,m15=0,m5=0,mean=0 978307200000000000 -goth.test/resetting_timer.span count=6i,max=120000000i,mean=30000000,min=10000000i,p50=12500000i,p95=120000000i,p99=120000000i 978307200000000000 +goth.test/resetting_timer.timer count=6i,max=120000000i,mean=30000000,min=10000000i,p50=12500000,p75=40500000,p95=120000000,p99=120000000,p999=120000000,p9999=120000000 978307200000000000 goth.test/timer.timer count=6i,m1=0,m15=0,m5=0,max=120000000i,mean=38333333.333333336,meanrate=0,min=20000000i,p50=22500000,p75=48000000,p95=120000000,p99=120000000,p999=120000000,p9999=120000000,stddev=36545253.529775314,variance=1335555555555555.2 978307200000000000 diff --git a/metrics/influxdb/testdata/influxdbv2.want b/metrics/influxdb/testdata/influxdbv2.want index 9443faedc5..ded9434c73 100644 --- a/metrics/influxdb/testdata/influxdbv2.want +++ b/metrics/influxdb/testdata/influxdbv2.want @@ -7,5 +7,5 @@ goth.test/gauge_float64.gauge value=34567.89 978307200000000000 goth.test/gauge_info.gauge value="{\"arch\":\"amd64\",\"commit\":\"7caa2d8163ae3132c1c2d6978c76610caee2d949\",\"os\":\"linux\",\"protocol_versions\":\"64 65 66\",\"version\":\"1.10.18-unstable\"}" 978307200000000000 goth.test/histogram.histogram count=3i,max=3i,mean=2,min=1i,p25=1,p50=2,p75=3,p95=3,p99=3,p999=3,p9999=3,stddev=0.816496580927726,variance=0.6666666666666666 978307200000000000 goth.test/meter.meter count=0i,m1=0,m15=0,m5=0,mean=0 978307200000000000 -goth.test/resetting_timer.span count=6i,max=120000000i,mean=30000000,min=10000000i,p50=12500000i,p95=120000000i,p99=120000000i 978307200000000000 +goth.test/resetting_timer.timer count=6i,max=120000000i,mean=30000000,min=10000000i,p50=12500000,p75=40500000,p95=120000000,p99=120000000,p999=120000000,p9999=120000000 978307200000000000 goth.test/timer.timer count=6i,m1=0,m15=0,m5=0,max=120000000i,mean=38333333.333333336,meanrate=0,min=20000000i,p50=22500000,p75=48000000,p95=120000000,p99=120000000,p999=120000000,p9999=120000000,stddev=36545253.529775314,variance=1335555555555555.2 978307200000000000 diff --git a/metrics/metrics.go b/metrics/metrics.go index 9ca8f115c0..9e0ac23dd5 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -24,23 +24,12 @@ import ( // for less cluttered pprof profiles. var Enabled = false -// EnabledExpensive is a soft-flag meant for external packages to check if costly -// metrics gathering is allowed or not. The goal is to separate standard metrics -// for health monitoring and debug metrics that might impact runtime performance. -var EnabledExpensive = false - // enablerFlags is the CLI flag names to use to enable metrics collections. var enablerFlags = []string{"metrics"} // enablerEnvVars is the env var names to use to enable metrics collections. var enablerEnvVars = []string{"GETH_METRICS"} -// expensiveEnablerFlags is the CLI flag names to use to enable metrics collections. -var expensiveEnablerFlags = []string{"metrics.expensive"} - -// expensiveEnablerEnvVars is the env var names to use to enable metrics collections. -var expensiveEnablerEnvVars = []string{"GETH_METRICS_EXPENSIVE"} - // Init enables or disables the metrics system. Since we need this to run before // any other code gets to create meters and timers, we'll actually do an ugly hack // and peek into the command line args for the metrics flag. @@ -53,14 +42,6 @@ func init() { } } } - for _, enabler := range expensiveEnablerEnvVars { - if val, found := syscall.Getenv(enabler); found && !EnabledExpensive { - if enable, _ := strconv.ParseBool(val); enable { // ignore error, flag parser will choke on it later - log.Info("Enabling expensive metrics collection") - EnabledExpensive = true - } - } - } for _, arg := range os.Args { flag := strings.TrimLeft(arg, "-") @@ -70,12 +51,6 @@ func init() { Enabled = true } } - for _, enabler := range expensiveEnablerFlags { - if !EnabledExpensive && flag == enabler { - log.Info("Enabling expensive metrics collection") - EnabledExpensive = true - } - } } } diff --git a/metrics/prometheus/collector.go b/metrics/prometheus/collector.go index 25b258d56a..353336763b 100644 --- a/metrics/prometheus/collector.go +++ b/metrics/prometheus/collector.go @@ -125,12 +125,13 @@ func (c *collector) addResettingTimer(name string, m metrics.ResettingTimerSnaps if m.Count() <= 0 { return } - ps := m.Percentiles([]float64{0.50, 0.95, 0.99}) + pv := []float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999} + ps := m.Percentiles(pv) c.writeSummaryCounter(name, m.Count()) c.buff.WriteString(fmt.Sprintf(typeSummaryTpl, mutateKey(name))) - c.writeSummaryPercentile(name, "0.50", ps[0]) - c.writeSummaryPercentile(name, "0.95", ps[1]) - c.writeSummaryPercentile(name, "0.99", ps[2]) + for i := range pv { + c.writeSummaryPercentile(name, strconv.FormatFloat(pv[i], 'f', -1, 64), ps[i]) + } c.buff.WriteRune('\n') } diff --git a/metrics/prometheus/testdata/prometheus.want b/metrics/prometheus/testdata/prometheus.want index 861c5f5cf0..a999d83801 100644 --- a/metrics/prometheus/testdata/prometheus.want +++ b/metrics/prometheus/testdata/prometheus.want @@ -53,9 +53,12 @@ test_meter 0 test_resetting_timer_count 6 # TYPE test_resetting_timer summary -test_resetting_timer {quantile="0.50"} 1.25e+07 +test_resetting_timer {quantile="0.5"} 1.25e+07 +test_resetting_timer {quantile="0.75"} 4.05e+07 test_resetting_timer {quantile="0.95"} 1.2e+08 test_resetting_timer {quantile="0.99"} 1.2e+08 +test_resetting_timer {quantile="0.999"} 1.2e+08 +test_resetting_timer {quantile="0.9999"} 1.2e+08 # TYPE test_timer_count counter test_timer_count 6