|
|
|
@ -111,6 +111,14 @@ var ( |
|
|
|
|
invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil) |
|
|
|
|
underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil) |
|
|
|
|
overflowedTxMeter = metrics.NewRegisteredMeter("txpool/overflowed", nil) |
|
|
|
|
// throttleTxMeter counts how many transactions are rejected due to too-many-changes between
|
|
|
|
|
// txpool reorgs.
|
|
|
|
|
throttleTxMeter = metrics.NewRegisteredMeter("txpool/throttle", nil) |
|
|
|
|
// reorgDurationTimer measures how long time a txpool reorg takes.
|
|
|
|
|
reorgDurationTimer = metrics.NewRegisteredTimer("txpool/reorgtime", nil) |
|
|
|
|
// dropBetweenReorgHistogram counts how many drops we experience between two reorg runs. It is expected
|
|
|
|
|
// that this number is pretty low, since txpool reorgs happen very frequently.
|
|
|
|
|
dropBetweenReorgHistogram = metrics.NewRegisteredHistogram("txpool/dropbetweenreorg", nil, metrics.NewExpDecaySample(1028, 0.015)) |
|
|
|
|
|
|
|
|
|
pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil) |
|
|
|
|
queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil) |
|
|
|
@ -256,6 +264,8 @@ type TxPool struct { |
|
|
|
|
reorgDoneCh chan chan struct{} |
|
|
|
|
reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop
|
|
|
|
|
wg sync.WaitGroup // tracks loop, scheduleReorgLoop
|
|
|
|
|
|
|
|
|
|
changesSinceReorg int // A counter for how many drops we've performed in-between reorg.
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
type txpoolResetRequest struct { |
|
|
|
@ -663,6 +673,15 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e |
|
|
|
|
underpricedTxMeter.Mark(1) |
|
|
|
|
return false, ErrUnderpriced |
|
|
|
|
} |
|
|
|
|
// We're about to replace a transaction. The reorg does a more thorough
|
|
|
|
|
// analysis of what to remove and how, but it runs async. We don't want to
|
|
|
|
|
// do too many replacements between reorg-runs, so we cap the number of
|
|
|
|
|
// replacements to 25% of the slots
|
|
|
|
|
if pool.changesSinceReorg > int(pool.config.GlobalSlots/4) { |
|
|
|
|
throttleTxMeter.Mark(1) |
|
|
|
|
return false, ErrTxPoolOverflow |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// New transaction is better than our worse ones, make room for it.
|
|
|
|
|
// If it's a local transaction, forcibly discard all available transactions.
|
|
|
|
|
// Otherwise if we can't make enough room for new one, abort the operation.
|
|
|
|
@ -674,6 +693,8 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e |
|
|
|
|
overflowedTxMeter.Mark(1) |
|
|
|
|
return false, ErrTxPoolOverflow |
|
|
|
|
} |
|
|
|
|
// Bump the counter of rejections-since-reorg
|
|
|
|
|
pool.changesSinceReorg += len(drop) |
|
|
|
|
// Kick out the underpriced remote transactions.
|
|
|
|
|
for _, tx := range drop { |
|
|
|
|
log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) |
|
|
|
@ -1114,6 +1135,9 @@ func (pool *TxPool) scheduleReorgLoop() { |
|
|
|
|
|
|
|
|
|
// runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop.
|
|
|
|
|
func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*txSortedMap) { |
|
|
|
|
defer func(t0 time.Time) { |
|
|
|
|
reorgDurationTimer.Update(time.Since(t0)) |
|
|
|
|
}(time.Now()) |
|
|
|
|
defer close(done) |
|
|
|
|
|
|
|
|
|
var promoteAddrs []common.Address |
|
|
|
@ -1163,6 +1187,8 @@ func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirt |
|
|
|
|
highestPending := list.LastElement() |
|
|
|
|
pool.pendingNonces.set(addr, highestPending.Nonce()+1) |
|
|
|
|
} |
|
|
|
|
dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg)) |
|
|
|
|
pool.changesSinceReorg = 0 // Reset change counter
|
|
|
|
|
pool.mu.Unlock() |
|
|
|
|
|
|
|
|
|
// Notify subsystems for newly added transactions
|
|
|
|
|