core/txpool: fix typos (#28213)

fix(core/txpool): fix typos
pull/28276/head
0xbstn 1 year ago committed by GitHub
parent 614804b33c
commit a081130081
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 2
      core/txpool/blobpool/evictheap.go
  2. 4
      core/txpool/blobpool/limbo.go
  3. 2
      core/txpool/blobpool/priority.go
  4. 2
      core/txpool/errors.go
  5. 2
      core/txpool/legacypool/legacypool.go
  6. 4
      core/txpool/txpool.go
  7. 2
      core/txpool/validation.go

@ -44,7 +44,7 @@ type evictHeap struct {
index map[common.Address]int // Indices into the heap for replacements index map[common.Address]int // Indices into the heap for replacements
} }
// newPriceHeap creates a new heap of cheapets accounts in the blob pool to evict // newPriceHeap creates a new heap of cheapest accounts in the blob pool to evict
// from in case of over saturation. // from in case of over saturation.
func newPriceHeap(basefee *uint256.Int, blobfee *uint256.Int, index *map[common.Address][]*blobTxMeta) *evictHeap { func newPriceHeap(basefee *uint256.Int, blobfee *uint256.Int, index *map[common.Address][]*blobTxMeta) *evictHeap {
heap := &evictHeap{ heap := &evictHeap{

@ -143,7 +143,7 @@ func (l *limbo) push(tx *types.Transaction, block uint64) error {
return errors.New("already tracked blob transaction") return errors.New("already tracked blob transaction")
} }
if err := l.setAndIndex(tx, block); err != nil { if err := l.setAndIndex(tx, block); err != nil {
log.Error("Failed to set and index liboed blobs", "tx", tx, "err", err) log.Error("Failed to set and index limboed blobs", "tx", tx, "err", err)
return err return err
} }
return nil return nil
@ -191,7 +191,7 @@ func (l *limbo) update(txhash common.Hash, block uint64) {
log.Trace("Blob transaction unchanged in limbo", "tx", txhash, "block", block) log.Trace("Blob transaction unchanged in limbo", "tx", txhash, "block", block)
return return
} }
// Retrieve the old blobs from the data store and write tehm back with a new // Retrieve the old blobs from the data store and write them back with a new
// block number. IF anything fails, there's not much to do, go on. // block number. IF anything fails, there's not much to do, go on.
item, err := l.getAndDrop(id) item, err := l.getAndDrop(id)
if err != nil { if err != nil {

@ -27,7 +27,7 @@ import (
var log2_1_125 = math.Log2(1.125) var log2_1_125 = math.Log2(1.125)
// evictionPriority calculates the eviction priority based on the algorithm // evictionPriority calculates the eviction priority based on the algorithm
// described in the BlobPool docs for a both fee components. // described in the BlobPool docs for both fee components.
// //
// This method takes about 8ns on a very recent laptop CPU, recalculating about // This method takes about 8ns on a very recent laptop CPU, recalculating about
// 125 million transaction priority values per second. // 125 million transaction priority values per second.

@ -52,6 +52,6 @@ var (
ErrOversizedData = errors.New("oversized data") ErrOversizedData = errors.New("oversized data")
// ErrFutureReplacePending is returned if a future transaction replaces a pending // ErrFutureReplacePending is returned if a future transaction replaces a pending
// transaction. Future transactions should only be able to replace other future transactions. // one. Future transactions should only be able to replace other future transactions.
ErrFutureReplacePending = errors.New("future transaction tries to replace pending") ErrFutureReplacePending = errors.New("future transaction tries to replace pending")
) )

@ -901,7 +901,7 @@ func (pool *LegacyPool) promoteTx(addr common.Address, hash common.Hash, tx *typ
} }
// addLocals enqueues a batch of transactions into the pool if they are valid, marking the // addLocals enqueues a batch of transactions into the pool if they are valid, marking the
// senders as a local ones, ensuring they go around the local pricing constraints. // senders as local ones, ensuring they go around the local pricing constraints.
// //
// This method is used to add transactions from the RPC API and performs synchronous pool // This method is used to add transactions from the RPC API and performs synchronous pool
// reorganization and event propagation. // reorganization and event propagation.

@ -70,7 +70,7 @@ type TxPool struct {
reservations map[common.Address]SubPool // Map with the account to pool reservations reservations map[common.Address]SubPool // Map with the account to pool reservations
reserveLock sync.Mutex // Lock protecting the account reservations reserveLock sync.Mutex // Lock protecting the account reservations
subs event.SubscriptionScope // Subscription scope to unscubscribe all on shutdown subs event.SubscriptionScope // Subscription scope to unsubscribe all on shutdown
quit chan chan error // Quit channel to tear down the head updater quit chan chan error // Quit channel to tear down the head updater
} }
@ -404,7 +404,7 @@ func (p *TxPool) Locals() []common.Address {
} }
// Status returns the known status (unknown/pending/queued) of a transaction // Status returns the known status (unknown/pending/queued) of a transaction
// identified by their hashes. // identified by its hash.
func (p *TxPool) Status(hash common.Hash) TxStatus { func (p *TxPool) Status(hash common.Hash) TxStatus {
for _, subpool := range p.subpools { for _, subpool := range p.subpools {
if status := subpool.Status(hash); status != TxStatusUnknown { if status := subpool.Status(hash); status != TxStatusUnknown {

@ -114,7 +114,7 @@ func ValidateTransaction(tx *types.Transaction, head *types.Header, signer types
if sidecar == nil { if sidecar == nil {
return fmt.Errorf("missing sidecar in blob transaction") return fmt.Errorf("missing sidecar in blob transaction")
} }
// Ensure the number of items in the blob transaction and vairous side // Ensure the number of items in the blob transaction and various side
// data match up before doing any expensive validations // data match up before doing any expensive validations
hashes := tx.BlobHashes() hashes := tx.BlobHashes()
if len(hashes) == 0 { if len(hashes) == 0 {

Loading…
Cancel
Save