mirror of https://github.com/ethereum/go-ethereum
light: remove package light(#28614)
This changes removes the package 'light', which is currently unused.pull/28618/head
parent
71817f318e
commit
58297e339b
@ -1,531 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package light implements on-demand retrieval capable state and chain objects
|
||||
// for the Ethereum Light Client.
|
||||
package light |
||||
|
||||
import ( |
||||
"context" |
||||
"errors" |
||||
"math/big" |
||||
"sync" |
||||
"sync/atomic" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/common/lru" |
||||
"github.com/ethereum/go-ethereum/consensus" |
||||
"github.com/ethereum/go-ethereum/core" |
||||
"github.com/ethereum/go-ethereum/core/rawdb" |
||||
"github.com/ethereum/go-ethereum/core/state" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/event" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/params" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
) |
||||
|
||||
var ( |
||||
bodyCacheLimit = 256 |
||||
blockCacheLimit = 256 |
||||
) |
||||
|
||||
// LightChain represents a canonical chain that by default only handles block
|
||||
// headers, downloading block bodies and receipts on demand through an ODR
|
||||
// interface. It only does header validation during chain insertion.
|
||||
type LightChain struct { |
||||
hc *core.HeaderChain |
||||
indexerConfig *IndexerConfig |
||||
chainDb ethdb.Database |
||||
engine consensus.Engine |
||||
odr OdrBackend |
||||
chainFeed event.Feed |
||||
chainSideFeed event.Feed |
||||
chainHeadFeed event.Feed |
||||
scope event.SubscriptionScope |
||||
genesisBlock *types.Block |
||||
forker *core.ForkChoice |
||||
|
||||
bodyCache *lru.Cache[common.Hash, *types.Body] |
||||
bodyRLPCache *lru.Cache[common.Hash, rlp.RawValue] |
||||
blockCache *lru.Cache[common.Hash, *types.Block] |
||||
|
||||
chainmu sync.RWMutex // protects header inserts
|
||||
quit chan struct{} |
||||
wg sync.WaitGroup |
||||
|
||||
// Atomic boolean switches:
|
||||
stopped atomic.Bool // whether LightChain is stopped or running
|
||||
procInterrupt atomic.Bool // interrupts chain insert
|
||||
} |
||||
|
||||
// NewLightChain returns a fully initialised light chain using information
|
||||
// available in the database. It initialises the default Ethereum header
|
||||
// validator.
|
||||
func NewLightChain(odr OdrBackend, config *params.ChainConfig, engine consensus.Engine) (*LightChain, error) { |
||||
bc := &LightChain{ |
||||
chainDb: odr.Database(), |
||||
indexerConfig: odr.IndexerConfig(), |
||||
odr: odr, |
||||
quit: make(chan struct{}), |
||||
bodyCache: lru.NewCache[common.Hash, *types.Body](bodyCacheLimit), |
||||
bodyRLPCache: lru.NewCache[common.Hash, rlp.RawValue](bodyCacheLimit), |
||||
blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit), |
||||
engine: engine, |
||||
} |
||||
bc.forker = core.NewForkChoice(bc, nil) |
||||
var err error |
||||
bc.hc, err = core.NewHeaderChain(odr.Database(), config, bc.engine, bc.getProcInterrupt) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
bc.genesisBlock, _ = bc.GetBlockByNumber(NoOdr, 0) |
||||
if bc.genesisBlock == nil { |
||||
return nil, core.ErrNoGenesis |
||||
} |
||||
if err := bc.loadLastState(); err != nil { |
||||
return nil, err |
||||
} |
||||
// Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain
|
||||
for hash := range core.BadHashes { |
||||
if header := bc.GetHeaderByHash(hash); header != nil { |
||||
log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash) |
||||
bc.SetHead(header.Number.Uint64() - 1) |
||||
log.Info("Chain rewind was successful, resuming normal operation") |
||||
} |
||||
} |
||||
return bc, nil |
||||
} |
||||
|
||||
func (lc *LightChain) getProcInterrupt() bool { |
||||
return lc.procInterrupt.Load() |
||||
} |
||||
|
||||
// Odr returns the ODR backend of the chain
|
||||
func (lc *LightChain) Odr() OdrBackend { |
||||
return lc.odr |
||||
} |
||||
|
||||
// HeaderChain returns the underlying header chain.
|
||||
func (lc *LightChain) HeaderChain() *core.HeaderChain { |
||||
return lc.hc |
||||
} |
||||
|
||||
// loadLastState loads the last known chain state from the database. This method
|
||||
// assumes that the chain manager mutex is held.
|
||||
func (lc *LightChain) loadLastState() error { |
||||
if head := rawdb.ReadHeadHeaderHash(lc.chainDb); head == (common.Hash{}) { |
||||
// Corrupt or empty database, init from scratch
|
||||
lc.Reset() |
||||
} else { |
||||
header := lc.GetHeaderByHash(head) |
||||
if header == nil { |
||||
// Corrupt or empty database, init from scratch
|
||||
lc.Reset() |
||||
} else { |
||||
lc.hc.SetCurrentHeader(header) |
||||
} |
||||
} |
||||
// Issue a status log and return
|
||||
header := lc.hc.CurrentHeader() |
||||
headerTd := lc.GetTd(header.Hash(), header.Number.Uint64()) |
||||
log.Info("Loaded most recent local header", "number", header.Number, "hash", header.Hash(), "td", headerTd, "age", common.PrettyAge(time.Unix(int64(header.Time), 0))) |
||||
return nil |
||||
} |
||||
|
||||
// SetHead rewinds the local chain to a new head. Everything above the new
|
||||
// head will be deleted and the new one set.
|
||||
func (lc *LightChain) SetHead(head uint64) error { |
||||
lc.chainmu.Lock() |
||||
defer lc.chainmu.Unlock() |
||||
|
||||
lc.hc.SetHead(head, nil, nil) |
||||
return lc.loadLastState() |
||||
} |
||||
|
||||
// SetHeadWithTimestamp rewinds the local chain to a new head that has at max
|
||||
// the given timestamp. Everything above the new head will be deleted and the
|
||||
// new one set.
|
||||
func (lc *LightChain) SetHeadWithTimestamp(timestamp uint64) error { |
||||
lc.chainmu.Lock() |
||||
defer lc.chainmu.Unlock() |
||||
|
||||
lc.hc.SetHeadWithTimestamp(timestamp, nil, nil) |
||||
return lc.loadLastState() |
||||
} |
||||
|
||||
// GasLimit returns the gas limit of the current HEAD block.
|
||||
func (lc *LightChain) GasLimit() uint64 { |
||||
return lc.hc.CurrentHeader().GasLimit |
||||
} |
||||
|
||||
// Reset purges the entire blockchain, restoring it to its genesis state.
|
||||
func (lc *LightChain) Reset() { |
||||
lc.ResetWithGenesisBlock(lc.genesisBlock) |
||||
} |
||||
|
||||
// ResetWithGenesisBlock purges the entire blockchain, restoring it to the
|
||||
// specified genesis state.
|
||||
func (lc *LightChain) ResetWithGenesisBlock(genesis *types.Block) { |
||||
// Dump the entire block chain and purge the caches
|
||||
lc.SetHead(0) |
||||
|
||||
lc.chainmu.Lock() |
||||
defer lc.chainmu.Unlock() |
||||
|
||||
// Prepare the genesis block and reinitialise the chain
|
||||
batch := lc.chainDb.NewBatch() |
||||
rawdb.WriteTd(batch, genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()) |
||||
rawdb.WriteBlock(batch, genesis) |
||||
rawdb.WriteHeadHeaderHash(batch, genesis.Hash()) |
||||
if err := batch.Write(); err != nil { |
||||
log.Crit("Failed to reset genesis block", "err", err) |
||||
} |
||||
lc.genesisBlock = genesis |
||||
lc.hc.SetGenesis(lc.genesisBlock.Header()) |
||||
lc.hc.SetCurrentHeader(lc.genesisBlock.Header()) |
||||
} |
||||
|
||||
// Accessors
|
||||
|
||||
// Engine retrieves the light chain's consensus engine.
|
||||
func (lc *LightChain) Engine() consensus.Engine { return lc.engine } |
||||
|
||||
// Genesis returns the genesis block
|
||||
func (lc *LightChain) Genesis() *types.Block { |
||||
return lc.genesisBlock |
||||
} |
||||
|
||||
func (lc *LightChain) StateCache() state.Database { |
||||
panic("not implemented") |
||||
} |
||||
|
||||
// GetBody retrieves a block body (transactions and uncles) from the database
|
||||
// or ODR service by hash, caching it if found.
|
||||
func (lc *LightChain) GetBody(ctx context.Context, hash common.Hash) (*types.Body, error) { |
||||
// Short circuit if the body's already in the cache, retrieve otherwise
|
||||
if cached, ok := lc.bodyCache.Get(hash); ok { |
||||
return cached, nil |
||||
} |
||||
number := lc.hc.GetBlockNumber(hash) |
||||
if number == nil { |
||||
return nil, errors.New("unknown block") |
||||
} |
||||
body, err := GetBody(ctx, lc.odr, hash, *number) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
// Cache the found body for next time and return
|
||||
lc.bodyCache.Add(hash, body) |
||||
return body, nil |
||||
} |
||||
|
||||
// GetBodyRLP retrieves a block body in RLP encoding from the database or
|
||||
// ODR service by hash, caching it if found.
|
||||
func (lc *LightChain) GetBodyRLP(ctx context.Context, hash common.Hash) (rlp.RawValue, error) { |
||||
// Short circuit if the body's already in the cache, retrieve otherwise
|
||||
if cached, ok := lc.bodyRLPCache.Get(hash); ok { |
||||
return cached, nil |
||||
} |
||||
number := lc.hc.GetBlockNumber(hash) |
||||
if number == nil { |
||||
return nil, errors.New("unknown block") |
||||
} |
||||
body, err := GetBodyRLP(ctx, lc.odr, hash, *number) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
// Cache the found body for next time and return
|
||||
lc.bodyRLPCache.Add(hash, body) |
||||
return body, nil |
||||
} |
||||
|
||||
// HasBlock checks if a block is fully present in the database or not, caching
|
||||
// it if present.
|
||||
func (lc *LightChain) HasBlock(hash common.Hash, number uint64) bool { |
||||
blk, _ := lc.GetBlock(NoOdr, hash, number) |
||||
return blk != nil |
||||
} |
||||
|
||||
// GetBlock retrieves a block from the database or ODR service by hash and number,
|
||||
// caching it if found.
|
||||
func (lc *LightChain) GetBlock(ctx context.Context, hash common.Hash, number uint64) (*types.Block, error) { |
||||
// Short circuit if the block's already in the cache, retrieve otherwise
|
||||
if block, ok := lc.blockCache.Get(hash); ok { |
||||
return block, nil |
||||
} |
||||
block, err := GetBlock(ctx, lc.odr, hash, number) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
// Cache the found block for next time and return
|
||||
lc.blockCache.Add(block.Hash(), block) |
||||
return block, nil |
||||
} |
||||
|
||||
// GetBlockByHash retrieves a block from the database or ODR service by hash,
|
||||
// caching it if found.
|
||||
func (lc *LightChain) GetBlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { |
||||
number := lc.hc.GetBlockNumber(hash) |
||||
if number == nil { |
||||
return nil, errors.New("unknown block") |
||||
} |
||||
return lc.GetBlock(ctx, hash, *number) |
||||
} |
||||
|
||||
// GetBlockByNumber retrieves a block from the database or ODR service by
|
||||
// number, caching it (associated with its hash) if found.
|
||||
func (lc *LightChain) GetBlockByNumber(ctx context.Context, number uint64) (*types.Block, error) { |
||||
hash, err := GetCanonicalHash(ctx, lc.odr, number) |
||||
if hash == (common.Hash{}) || err != nil { |
||||
return nil, err |
||||
} |
||||
return lc.GetBlock(ctx, hash, number) |
||||
} |
||||
|
||||
// Stop stops the blockchain service. If any imports are currently in progress
|
||||
// it will abort them using the procInterrupt.
|
||||
func (lc *LightChain) Stop() { |
||||
if !lc.stopped.CompareAndSwap(false, true) { |
||||
return |
||||
} |
||||
close(lc.quit) |
||||
lc.StopInsert() |
||||
lc.wg.Wait() |
||||
log.Info("Blockchain stopped") |
||||
} |
||||
|
||||
// StopInsert interrupts all insertion methods, causing them to return
|
||||
// errInsertionInterrupted as soon as possible. Insertion is permanently disabled after
|
||||
// calling this method.
|
||||
func (lc *LightChain) StopInsert() { |
||||
lc.procInterrupt.Store(true) |
||||
} |
||||
|
||||
// Rollback is designed to remove a chain of links from the database that aren't
|
||||
// certain enough to be valid.
|
||||
func (lc *LightChain) Rollback(chain []common.Hash) { |
||||
lc.chainmu.Lock() |
||||
defer lc.chainmu.Unlock() |
||||
|
||||
batch := lc.chainDb.NewBatch() |
||||
for i := len(chain) - 1; i >= 0; i-- { |
||||
hash := chain[i] |
||||
|
||||
// Degrade the chain markers if they are explicitly reverted.
|
||||
// In theory we should update all in-memory markers in the
|
||||
// last step, however the direction of rollback is from high
|
||||
// to low, so it's safe the update in-memory markers directly.
|
||||
if head := lc.hc.CurrentHeader(); head.Hash() == hash { |
||||
rawdb.WriteHeadHeaderHash(batch, head.ParentHash) |
||||
lc.hc.SetCurrentHeader(lc.GetHeader(head.ParentHash, head.Number.Uint64()-1)) |
||||
} |
||||
} |
||||
if err := batch.Write(); err != nil { |
||||
log.Crit("Failed to rollback light chain", "error", err) |
||||
} |
||||
} |
||||
|
||||
func (lc *LightChain) InsertHeader(header *types.Header) error { |
||||
// Verify the header first before obtaining the lock
|
||||
headers := []*types.Header{header} |
||||
if _, err := lc.hc.ValidateHeaderChain(headers); err != nil { |
||||
return err |
||||
} |
||||
// Make sure only one thread manipulates the chain at once
|
||||
lc.chainmu.Lock() |
||||
defer lc.chainmu.Unlock() |
||||
|
||||
lc.wg.Add(1) |
||||
defer lc.wg.Done() |
||||
|
||||
_, err := lc.hc.WriteHeaders(headers) |
||||
log.Info("Inserted header", "number", header.Number, "hash", header.Hash()) |
||||
return err |
||||
} |
||||
|
||||
func (lc *LightChain) SetCanonical(header *types.Header) error { |
||||
lc.chainmu.Lock() |
||||
defer lc.chainmu.Unlock() |
||||
|
||||
lc.wg.Add(1) |
||||
defer lc.wg.Done() |
||||
|
||||
if err := lc.hc.Reorg([]*types.Header{header}); err != nil { |
||||
return err |
||||
} |
||||
// Emit events
|
||||
block := types.NewBlockWithHeader(header) |
||||
lc.chainFeed.Send(core.ChainEvent{Block: block, Hash: block.Hash()}) |
||||
lc.chainHeadFeed.Send(core.ChainHeadEvent{Block: block}) |
||||
log.Info("Set the chain head", "number", block.Number(), "hash", block.Hash()) |
||||
return nil |
||||
} |
||||
|
||||
// InsertHeaderChain attempts to insert the given header chain in to the local
|
||||
// chain, possibly creating a reorg. If an error is returned, it will return the
|
||||
// index number of the failing header as well an error describing what went wrong.
|
||||
|
||||
// In the case of a light chain, InsertHeaderChain also creates and posts light
|
||||
// chain events when necessary.
|
||||
func (lc *LightChain) InsertHeaderChain(chain []*types.Header) (int, error) { |
||||
if len(chain) == 0 { |
||||
return 0, nil |
||||
} |
||||
start := time.Now() |
||||
if i, err := lc.hc.ValidateHeaderChain(chain); err != nil { |
||||
return i, err |
||||
} |
||||
|
||||
// Make sure only one thread manipulates the chain at once
|
||||
lc.chainmu.Lock() |
||||
defer lc.chainmu.Unlock() |
||||
|
||||
lc.wg.Add(1) |
||||
defer lc.wg.Done() |
||||
|
||||
status, err := lc.hc.InsertHeaderChain(chain, start, lc.forker) |
||||
if err != nil || len(chain) == 0 { |
||||
return 0, err |
||||
} |
||||
|
||||
// Create chain event for the new head block of this insertion.
|
||||
var ( |
||||
lastHeader = chain[len(chain)-1] |
||||
block = types.NewBlockWithHeader(lastHeader) |
||||
) |
||||
switch status { |
||||
case core.CanonStatTy: |
||||
lc.chainFeed.Send(core.ChainEvent{Block: block, Hash: block.Hash()}) |
||||
lc.chainHeadFeed.Send(core.ChainHeadEvent{Block: block}) |
||||
case core.SideStatTy: |
||||
lc.chainSideFeed.Send(core.ChainSideEvent{Block: block}) |
||||
} |
||||
return 0, err |
||||
} |
||||
|
||||
// CurrentHeader retrieves the current head header of the canonical chain. The
|
||||
// header is retrieved from the HeaderChain's internal cache.
|
||||
func (lc *LightChain) CurrentHeader() *types.Header { |
||||
return lc.hc.CurrentHeader() |
||||
} |
||||
|
||||
// GetTd retrieves a block's total difficulty in the canonical chain from the
|
||||
// database by hash and number, caching it if found.
|
||||
func (lc *LightChain) GetTd(hash common.Hash, number uint64) *big.Int { |
||||
return lc.hc.GetTd(hash, number) |
||||
} |
||||
|
||||
// GetTdOdr retrieves the total difficult from the database or
|
||||
// network by hash and number, caching it (associated with its hash) if found.
|
||||
func (lc *LightChain) GetTdOdr(ctx context.Context, hash common.Hash, number uint64) *big.Int { |
||||
td := lc.GetTd(hash, number) |
||||
if td != nil { |
||||
return td |
||||
} |
||||
td, _ = GetTd(ctx, lc.odr, hash, number) |
||||
return td |
||||
} |
||||
|
||||
// GetHeader retrieves a block header from the database by hash and number,
|
||||
// caching it if found.
|
||||
func (lc *LightChain) GetHeader(hash common.Hash, number uint64) *types.Header { |
||||
return lc.hc.GetHeader(hash, number) |
||||
} |
||||
|
||||
// GetHeaderByHash retrieves a block header from the database by hash, caching it if
|
||||
// found.
|
||||
func (lc *LightChain) GetHeaderByHash(hash common.Hash) *types.Header { |
||||
return lc.hc.GetHeaderByHash(hash) |
||||
} |
||||
|
||||
// HasHeader checks if a block header is present in the database or not, caching
|
||||
// it if present.
|
||||
func (lc *LightChain) HasHeader(hash common.Hash, number uint64) bool { |
||||
return lc.hc.HasHeader(hash, number) |
||||
} |
||||
|
||||
// GetCanonicalHash returns the canonical hash for a given block number
|
||||
func (bc *LightChain) GetCanonicalHash(number uint64) common.Hash { |
||||
return bc.hc.GetCanonicalHash(number) |
||||
} |
||||
|
||||
// GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
|
||||
// a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
|
||||
// number of blocks to be individually checked before we reach the canonical chain.
|
||||
//
|
||||
// Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
|
||||
func (lc *LightChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) { |
||||
return lc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical) |
||||
} |
||||
|
||||
// GetHeaderByNumber retrieves a block header from the database by number,
|
||||
// caching it (associated with its hash) if found.
|
||||
func (lc *LightChain) GetHeaderByNumber(number uint64) *types.Header { |
||||
return lc.hc.GetHeaderByNumber(number) |
||||
} |
||||
|
||||
// GetHeaderByNumberOdr retrieves a block header from the database or network
|
||||
// by number, caching it (associated with its hash) if found.
|
||||
func (lc *LightChain) GetHeaderByNumberOdr(ctx context.Context, number uint64) (*types.Header, error) { |
||||
if header := lc.hc.GetHeaderByNumber(number); header != nil { |
||||
return header, nil |
||||
} |
||||
return GetHeaderByNumber(ctx, lc.odr, number) |
||||
} |
||||
|
||||
// Config retrieves the header chain's chain configuration.
|
||||
func (lc *LightChain) Config() *params.ChainConfig { return lc.hc.Config() } |
||||
|
||||
// LockChain locks the chain mutex for reading so that multiple canonical hashes can be
|
||||
// retrieved while it is guaranteed that they belong to the same version of the chain
|
||||
func (lc *LightChain) LockChain() { |
||||
lc.chainmu.RLock() |
||||
} |
||||
|
||||
// UnlockChain unlocks the chain mutex
|
||||
func (lc *LightChain) UnlockChain() { |
||||
lc.chainmu.RUnlock() |
||||
} |
||||
|
||||
// SubscribeChainEvent registers a subscription of ChainEvent.
|
||||
func (lc *LightChain) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription { |
||||
return lc.scope.Track(lc.chainFeed.Subscribe(ch)) |
||||
} |
||||
|
||||
// SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
|
||||
func (lc *LightChain) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription { |
||||
return lc.scope.Track(lc.chainHeadFeed.Subscribe(ch)) |
||||
} |
||||
|
||||
// SubscribeChainSideEvent registers a subscription of ChainSideEvent.
|
||||
func (lc *LightChain) SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) event.Subscription { |
||||
return lc.scope.Track(lc.chainSideFeed.Subscribe(ch)) |
||||
} |
||||
|
||||
// SubscribeLogsEvent implements the interface of filters.Backend
|
||||
// LightChain does not send logs events, so return an empty subscription.
|
||||
func (lc *LightChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { |
||||
return lc.scope.Track(new(event.Feed).Subscribe(ch)) |
||||
} |
||||
|
||||
// SubscribeRemovedLogsEvent implements the interface of filters.Backend
|
||||
// LightChain does not send core.RemovedLogsEvent, so return an empty subscription.
|
||||
func (lc *LightChain) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription { |
||||
return lc.scope.Track(new(event.Feed).Subscribe(ch)) |
||||
} |
@ -1,358 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package light |
||||
|
||||
import ( |
||||
"context" |
||||
"errors" |
||||
"math/big" |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/consensus/ethash" |
||||
"github.com/ethereum/go-ethereum/core" |
||||
"github.com/ethereum/go-ethereum/core/rawdb" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/params" |
||||
"github.com/ethereum/go-ethereum/trie" |
||||
) |
||||
|
||||
// So we can deterministically seed different blockchains
|
||||
var ( |
||||
canonicalSeed = 1 |
||||
forkSeed = 2 |
||||
) |
||||
|
||||
// makeHeaderChain creates a deterministic chain of headers rooted at parent.
|
||||
func makeHeaderChain(parent *types.Header, n int, db ethdb.Database, seed int) []*types.Header { |
||||
blocks, _ := core.GenerateChain(params.TestChainConfig, types.NewBlockWithHeader(parent), ethash.NewFaker(), db, n, func(i int, b *core.BlockGen) { |
||||
b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)}) |
||||
}) |
||||
headers := make([]*types.Header, len(blocks)) |
||||
for i, block := range blocks { |
||||
headers[i] = block.Header() |
||||
} |
||||
return headers |
||||
} |
||||
|
||||
// newCanonical creates a chain database, and injects a deterministic canonical
|
||||
// chain. Depending on the full flag, if creates either a full block chain or a
|
||||
// header only chain.
|
||||
func newCanonical(n int) (ethdb.Database, *LightChain, error) { |
||||
db := rawdb.NewMemoryDatabase() |
||||
gspec := core.Genesis{Config: params.TestChainConfig} |
||||
genesis := gspec.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults)) |
||||
blockchain, _ := NewLightChain(&dummyOdr{db: db, indexerConfig: TestClientIndexerConfig}, gspec.Config, ethash.NewFaker()) |
||||
|
||||
// Create and inject the requested chain
|
||||
if n == 0 { |
||||
return db, blockchain, nil |
||||
} |
||||
// Header-only chain requested
|
||||
headers := makeHeaderChain(genesis.Header(), n, db, canonicalSeed) |
||||
_, err := blockchain.InsertHeaderChain(headers) |
||||
return db, blockchain, err |
||||
} |
||||
|
||||
// newTestLightChain creates a LightChain that doesn't validate anything.
|
||||
func newTestLightChain() *LightChain { |
||||
db := rawdb.NewMemoryDatabase() |
||||
gspec := &core.Genesis{ |
||||
Difficulty: big.NewInt(1), |
||||
Config: params.TestChainConfig, |
||||
} |
||||
gspec.MustCommit(db, trie.NewDatabase(db, trie.HashDefaults)) |
||||
lc, err := NewLightChain(&dummyOdr{db: db}, gspec.Config, ethash.NewFullFaker()) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
return lc |
||||
} |
||||
|
||||
// Test fork of length N starting from block i
|
||||
func testFork(t *testing.T, LightChain *LightChain, i, n int, comparator func(td1, td2 *big.Int)) { |
||||
// Copy old chain up to #i into a new db
|
||||
db, LightChain2, err := newCanonical(i) |
||||
if err != nil { |
||||
t.Fatal("could not make new canonical in testFork", err) |
||||
} |
||||
// Assert the chains have the same header/block at #i
|
||||
var hash1, hash2 common.Hash |
||||
hash1 = LightChain.GetHeaderByNumber(uint64(i)).Hash() |
||||
hash2 = LightChain2.GetHeaderByNumber(uint64(i)).Hash() |
||||
if hash1 != hash2 { |
||||
t.Errorf("chain content mismatch at %d: have hash %v, want hash %v", i, hash2, hash1) |
||||
} |
||||
// Extend the newly created chain
|
||||
headerChainB := makeHeaderChain(LightChain2.CurrentHeader(), n, db, forkSeed) |
||||
if _, err := LightChain2.InsertHeaderChain(headerChainB); err != nil { |
||||
t.Fatalf("failed to insert forking chain: %v", err) |
||||
} |
||||
// Sanity check that the forked chain can be imported into the original
|
||||
var tdPre, tdPost *big.Int |
||||
cur := LightChain.CurrentHeader() |
||||
tdPre = LightChain.GetTd(cur.Hash(), cur.Number.Uint64()) |
||||
if err := testHeaderChainImport(headerChainB, LightChain); err != nil { |
||||
t.Fatalf("failed to import forked header chain: %v", err) |
||||
} |
||||
last := headerChainB[len(headerChainB)-1] |
||||
tdPost = LightChain.GetTd(last.Hash(), last.Number.Uint64()) |
||||
// Compare the total difficulties of the chains
|
||||
comparator(tdPre, tdPost) |
||||
} |
||||
|
||||
// testHeaderChainImport tries to process a chain of header, writing them into
|
||||
// the database if successful.
|
||||
func testHeaderChainImport(chain []*types.Header, lightchain *LightChain) error { |
||||
for _, header := range chain { |
||||
// Try and validate the header
|
||||
if err := lightchain.engine.VerifyHeader(lightchain.hc, header); err != nil { |
||||
return err |
||||
} |
||||
// Manually insert the header into the database, but don't reorganize (allows subsequent testing)
|
||||
lightchain.chainmu.Lock() |
||||
rawdb.WriteTd(lightchain.chainDb, header.Hash(), header.Number.Uint64(), |
||||
new(big.Int).Add(header.Difficulty, lightchain.GetTd(header.ParentHash, header.Number.Uint64()-1))) |
||||
rawdb.WriteHeader(lightchain.chainDb, header) |
||||
lightchain.chainmu.Unlock() |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// Tests that given a starting canonical chain of a given size, it can be extended
|
||||
// with various length chains.
|
||||
func TestExtendCanonicalHeaders(t *testing.T) { |
||||
length := 5 |
||||
|
||||
// Make first chain starting from genesis
|
||||
_, processor, err := newCanonical(length) |
||||
if err != nil { |
||||
t.Fatalf("failed to make new canonical chain: %v", err) |
||||
} |
||||
// Define the difficulty comparator
|
||||
better := func(td1, td2 *big.Int) { |
||||
if td2.Cmp(td1) <= 0 { |
||||
t.Errorf("total difficulty mismatch: have %v, expected more than %v", td2, td1) |
||||
} |
||||
} |
||||
// Start fork from current height
|
||||
testFork(t, processor, length, 1, better) |
||||
testFork(t, processor, length, 2, better) |
||||
testFork(t, processor, length, 5, better) |
||||
testFork(t, processor, length, 10, better) |
||||
} |
||||
|
||||
// Tests that given a starting canonical chain of a given size, creating shorter
|
||||
// forks do not take canonical ownership.
|
||||
func TestShorterForkHeaders(t *testing.T) { |
||||
length := 10 |
||||
|
||||
// Make first chain starting from genesis
|
||||
_, processor, err := newCanonical(length) |
||||
if err != nil { |
||||
t.Fatalf("failed to make new canonical chain: %v", err) |
||||
} |
||||
// Define the difficulty comparator
|
||||
worse := func(td1, td2 *big.Int) { |
||||
if td2.Cmp(td1) >= 0 { |
||||
t.Errorf("total difficulty mismatch: have %v, expected less than %v", td2, td1) |
||||
} |
||||
} |
||||
// Sum of numbers must be less than `length` for this to be a shorter fork
|
||||
testFork(t, processor, 0, 3, worse) |
||||
testFork(t, processor, 0, 7, worse) |
||||
testFork(t, processor, 1, 1, worse) |
||||
testFork(t, processor, 1, 7, worse) |
||||
testFork(t, processor, 5, 3, worse) |
||||
testFork(t, processor, 5, 4, worse) |
||||
} |
||||
|
||||
// Tests that given a starting canonical chain of a given size, creating longer
|
||||
// forks do take canonical ownership.
|
||||
func TestLongerForkHeaders(t *testing.T) { |
||||
length := 10 |
||||
|
||||
// Make first chain starting from genesis
|
||||
_, processor, err := newCanonical(length) |
||||
if err != nil { |
||||
t.Fatalf("failed to make new canonical chain: %v", err) |
||||
} |
||||
// Define the difficulty comparator
|
||||
better := func(td1, td2 *big.Int) { |
||||
if td2.Cmp(td1) <= 0 { |
||||
t.Errorf("total difficulty mismatch: have %v, expected more than %v", td2, td1) |
||||
} |
||||
} |
||||
// Sum of numbers must be greater than `length` for this to be a longer fork
|
||||
testFork(t, processor, 0, 11, better) |
||||
testFork(t, processor, 0, 15, better) |
||||
testFork(t, processor, 1, 10, better) |
||||
testFork(t, processor, 1, 12, better) |
||||
testFork(t, processor, 5, 6, better) |
||||
testFork(t, processor, 5, 8, better) |
||||
} |
||||
|
||||
// Tests that given a starting canonical chain of a given size, creating equal
|
||||
// forks do take canonical ownership.
|
||||
func TestEqualForkHeaders(t *testing.T) { |
||||
length := 10 |
||||
|
||||
// Make first chain starting from genesis
|
||||
_, processor, err := newCanonical(length) |
||||
if err != nil { |
||||
t.Fatalf("failed to make new canonical chain: %v", err) |
||||
} |
||||
// Define the difficulty comparator
|
||||
equal := func(td1, td2 *big.Int) { |
||||
if td2.Cmp(td1) != 0 { |
||||
t.Errorf("total difficulty mismatch: have %v, want %v", td2, td1) |
||||
} |
||||
} |
||||
// Sum of numbers must be equal to `length` for this to be an equal fork
|
||||
testFork(t, processor, 0, 10, equal) |
||||
testFork(t, processor, 1, 9, equal) |
||||
testFork(t, processor, 2, 8, equal) |
||||
testFork(t, processor, 5, 5, equal) |
||||
testFork(t, processor, 6, 4, equal) |
||||
testFork(t, processor, 9, 1, equal) |
||||
} |
||||
|
||||
// Tests that chains missing links do not get accepted by the processor.
|
||||
func TestBrokenHeaderChain(t *testing.T) { |
||||
// Make chain starting from genesis
|
||||
db, LightChain, err := newCanonical(10) |
||||
if err != nil { |
||||
t.Fatalf("failed to make new canonical chain: %v", err) |
||||
} |
||||
// Create a forked chain, and try to insert with a missing link
|
||||
chain := makeHeaderChain(LightChain.CurrentHeader(), 5, db, forkSeed)[1:] |
||||
if err := testHeaderChainImport(chain, LightChain); err == nil { |
||||
t.Errorf("broken header chain not reported") |
||||
} |
||||
} |
||||
|
||||
func makeHeaderChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Header { |
||||
var chain []*types.Header |
||||
for i, difficulty := range d { |
||||
header := &types.Header{ |
||||
Coinbase: common.Address{seed}, |
||||
Number: big.NewInt(int64(i + 1)), |
||||
Difficulty: big.NewInt(int64(difficulty)), |
||||
UncleHash: types.EmptyUncleHash, |
||||
TxHash: types.EmptyTxsHash, |
||||
ReceiptHash: types.EmptyReceiptsHash, |
||||
} |
||||
if i == 0 { |
||||
header.ParentHash = genesis.Hash() |
||||
} else { |
||||
header.ParentHash = chain[i-1].Hash() |
||||
} |
||||
chain = append(chain, types.CopyHeader(header)) |
||||
} |
||||
return chain |
||||
} |
||||
|
||||
type dummyOdr struct { |
||||
OdrBackend |
||||
db ethdb.Database |
||||
indexerConfig *IndexerConfig |
||||
} |
||||
|
||||
func (odr *dummyOdr) Database() ethdb.Database { |
||||
return odr.db |
||||
} |
||||
|
||||
func (odr *dummyOdr) Retrieve(ctx context.Context, req OdrRequest) error { |
||||
return nil |
||||
} |
||||
|
||||
func (odr *dummyOdr) IndexerConfig() *IndexerConfig { |
||||
return odr.indexerConfig |
||||
} |
||||
|
||||
// Tests that reorganizing a long difficult chain after a short easy one
|
||||
// overwrites the canonical numbers and links in the database.
|
||||
func TestReorgLongHeaders(t *testing.T) { |
||||
testReorg(t, []int{1, 2, 4}, []int{1, 2, 3, 4}, 10) |
||||
} |
||||
|
||||
// Tests that reorganizing a short difficult chain after a long easy one
|
||||
// overwrites the canonical numbers and links in the database.
|
||||
func TestReorgShortHeaders(t *testing.T) { |
||||
testReorg(t, []int{1, 2, 3, 4}, []int{1, 10}, 11) |
||||
} |
||||
|
||||
func testReorg(t *testing.T, first, second []int, td int64) { |
||||
bc := newTestLightChain() |
||||
|
||||
// Insert an easy and a difficult chain afterwards
|
||||
bc.InsertHeaderChain(makeHeaderChainWithDiff(bc.genesisBlock, first, 11)) |
||||
bc.InsertHeaderChain(makeHeaderChainWithDiff(bc.genesisBlock, second, 22)) |
||||
// Check that the chain is valid number and link wise
|
||||
prev := bc.CurrentHeader() |
||||
for header := bc.GetHeaderByNumber(bc.CurrentHeader().Number.Uint64() - 1); header.Number.Uint64() != 0; prev, header = header, bc.GetHeaderByNumber(header.Number.Uint64()-1) { |
||||
if prev.ParentHash != header.Hash() { |
||||
t.Errorf("parent header hash mismatch: have %x, want %x", prev.ParentHash, header.Hash()) |
||||
} |
||||
} |
||||
// Make sure the chain total difficulty is the correct one
|
||||
want := new(big.Int).Add(bc.genesisBlock.Difficulty(), big.NewInt(td)) |
||||
if have := bc.GetTd(bc.CurrentHeader().Hash(), bc.CurrentHeader().Number.Uint64()); have.Cmp(want) != 0 { |
||||
t.Errorf("total difficulty mismatch: have %v, want %v", have, want) |
||||
} |
||||
} |
||||
|
||||
// Tests that the insertion functions detect banned hashes.
|
||||
func TestBadHeaderHashes(t *testing.T) { |
||||
bc := newTestLightChain() |
||||
|
||||
// Create a chain, ban a hash and try to import
|
||||
var err error |
||||
headers := makeHeaderChainWithDiff(bc.genesisBlock, []int{1, 2, 4}, 10) |
||||
core.BadHashes[headers[2].Hash()] = true |
||||
if _, err = bc.InsertHeaderChain(headers); !errors.Is(err, core.ErrBannedHash) { |
||||
t.Errorf("error mismatch: have: %v, want %v", err, core.ErrBannedHash) |
||||
} |
||||
} |
||||
|
||||
// Tests that bad hashes are detected on boot, and the chan rolled back to a
|
||||
// good state prior to the bad hash.
|
||||
func TestReorgBadHeaderHashes(t *testing.T) { |
||||
bc := newTestLightChain() |
||||
|
||||
// Create a chain, import and ban afterwards
|
||||
headers := makeHeaderChainWithDiff(bc.genesisBlock, []int{1, 2, 3, 4}, 10) |
||||
|
||||
if _, err := bc.InsertHeaderChain(headers); err != nil { |
||||
t.Fatalf("failed to import headers: %v", err) |
||||
} |
||||
if bc.CurrentHeader().Hash() != headers[3].Hash() { |
||||
t.Errorf("last header hash mismatch: have: %x, want %x", bc.CurrentHeader().Hash(), headers[3].Hash()) |
||||
} |
||||
core.BadHashes[headers[3].Hash()] = true |
||||
defer func() { delete(core.BadHashes, headers[3].Hash()) }() |
||||
|
||||
// Create a new LightChain and check that it rolled back the state.
|
||||
ncm, err := NewLightChain(&dummyOdr{db: bc.chainDb}, params.TestChainConfig, ethash.NewFaker()) |
||||
if err != nil { |
||||
t.Fatalf("failed to create new chain manager: %v", err) |
||||
} |
||||
if ncm.CurrentHeader().Hash() != headers[2].Hash() { |
||||
t.Errorf("last header hash mismatch: have: %x, want %x", ncm.CurrentHeader().Hash(), headers[2].Hash()) |
||||
} |
||||
} |
@ -1,196 +0,0 @@ |
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package light |
||||
|
||||
import ( |
||||
"context" |
||||
"errors" |
||||
"math/big" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/core" |
||||
"github.com/ethereum/go-ethereum/core/rawdb" |
||||
"github.com/ethereum/go-ethereum/core/txpool" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/trie/trienode" |
||||
) |
||||
|
||||
// NoOdr is the default context passed to an ODR capable function when the ODR
|
||||
// service is not required.
|
||||
var NoOdr = context.Background() |
||||
|
||||
// ErrNoPeers is returned if no peers capable of serving a queued request are available
|
||||
var ErrNoPeers = errors.New("no suitable peers available") |
||||
|
||||
// OdrBackend is an interface to a backend service that handles ODR retrievals type
|
||||
type OdrBackend interface { |
||||
Database() ethdb.Database |
||||
ChtIndexer() *core.ChainIndexer |
||||
BloomTrieIndexer() *core.ChainIndexer |
||||
BloomIndexer() *core.ChainIndexer |
||||
Retrieve(ctx context.Context, req OdrRequest) error |
||||
RetrieveTxStatus(ctx context.Context, req *TxStatusRequest) error |
||||
IndexerConfig() *IndexerConfig |
||||
} |
||||
|
||||
// OdrRequest is an interface for retrieval requests
|
||||
type OdrRequest interface { |
||||
StoreResult(db ethdb.Database) |
||||
} |
||||
|
||||
// TrieID identifies a state or account storage trie
|
||||
type TrieID struct { |
||||
BlockHash common.Hash |
||||
BlockNumber uint64 |
||||
StateRoot common.Hash |
||||
Root common.Hash |
||||
AccountAddress []byte |
||||
} |
||||
|
||||
// StateTrieID returns a TrieID for a state trie belonging to a certain block
|
||||
// header.
|
||||
func StateTrieID(header *types.Header) *TrieID { |
||||
return &TrieID{ |
||||
BlockHash: header.Hash(), |
||||
BlockNumber: header.Number.Uint64(), |
||||
StateRoot: header.Root, |
||||
Root: header.Root, |
||||
AccountAddress: nil, |
||||
} |
||||
} |
||||
|
||||
// StorageTrieID returns a TrieID for a contract storage trie at a given account
|
||||
// of a given state trie. It also requires the root hash of the trie for
|
||||
// checking Merkle proofs.
|
||||
func StorageTrieID(state *TrieID, address common.Address, root common.Hash) *TrieID { |
||||
return &TrieID{ |
||||
BlockHash: state.BlockHash, |
||||
BlockNumber: state.BlockNumber, |
||||
StateRoot: state.StateRoot, |
||||
AccountAddress: address[:], |
||||
Root: root, |
||||
} |
||||
} |
||||
|
||||
// TrieRequest is the ODR request type for state/storage trie entries
|
||||
type TrieRequest struct { |
||||
Id *TrieID |
||||
Key []byte |
||||
Proof *trienode.ProofSet |
||||
} |
||||
|
||||
// StoreResult stores the retrieved data in local database
|
||||
func (req *TrieRequest) StoreResult(db ethdb.Database) { |
||||
req.Proof.Store(db) |
||||
} |
||||
|
||||
// CodeRequest is the ODR request type for retrieving contract code
|
||||
type CodeRequest struct { |
||||
Id *TrieID // references storage trie of the account
|
||||
Hash common.Hash |
||||
Data []byte |
||||
} |
||||
|
||||
// StoreResult stores the retrieved data in local database
|
||||
func (req *CodeRequest) StoreResult(db ethdb.Database) { |
||||
rawdb.WriteCode(db, req.Hash, req.Data) |
||||
} |
||||
|
||||
// BlockRequest is the ODR request type for retrieving block bodies
|
||||
type BlockRequest struct { |
||||
Hash common.Hash |
||||
Number uint64 |
||||
Header *types.Header |
||||
Rlp []byte |
||||
} |
||||
|
||||
// StoreResult stores the retrieved data in local database
|
||||
func (req *BlockRequest) StoreResult(db ethdb.Database) { |
||||
rawdb.WriteBodyRLP(db, req.Hash, req.Number, req.Rlp) |
||||
} |
||||
|
||||
// ReceiptsRequest is the ODR request type for retrieving receipts.
|
||||
type ReceiptsRequest struct { |
||||
Hash common.Hash |
||||
Number uint64 |
||||
Header *types.Header |
||||
Receipts types.Receipts |
||||
} |
||||
|
||||
// StoreResult stores the retrieved data in local database
|
||||
func (req *ReceiptsRequest) StoreResult(db ethdb.Database) { |
||||
rawdb.WriteReceipts(db, req.Hash, req.Number, req.Receipts) |
||||
} |
||||
|
||||
// ChtRequest is the ODR request type for retrieving header by Canonical Hash Trie
|
||||
type ChtRequest struct { |
||||
Config *IndexerConfig |
||||
ChtNum, BlockNum uint64 |
||||
ChtRoot common.Hash |
||||
Header *types.Header |
||||
Td *big.Int |
||||
Proof *trienode.ProofSet |
||||
} |
||||
|
||||
// StoreResult stores the retrieved data in local database
|
||||
func (req *ChtRequest) StoreResult(db ethdb.Database) { |
||||
hash, num := req.Header.Hash(), req.Header.Number.Uint64() |
||||
rawdb.WriteHeader(db, req.Header) |
||||
rawdb.WriteTd(db, hash, num, req.Td) |
||||
rawdb.WriteCanonicalHash(db, hash, num) |
||||
} |
||||
|
||||
// BloomRequest is the ODR request type for retrieving bloom filters from a CHT structure
|
||||
type BloomRequest struct { |
||||
OdrRequest |
||||
Config *IndexerConfig |
||||
BloomTrieNum uint64 |
||||
BitIdx uint |
||||
SectionIndexList []uint64 |
||||
BloomTrieRoot common.Hash |
||||
BloomBits [][]byte |
||||
Proofs *trienode.ProofSet |
||||
} |
||||
|
||||
// StoreResult stores the retrieved data in local database
|
||||
func (req *BloomRequest) StoreResult(db ethdb.Database) { |
||||
for i, sectionIdx := range req.SectionIndexList { |
||||
sectionHead := rawdb.ReadCanonicalHash(db, (sectionIdx+1)*req.Config.BloomTrieSize-1) |
||||
// if we don't have the canonical hash stored for this section head number, we'll still store it under
|
||||
// a key with a zero sectionHead. GetBloomBits will look there too if we still don't have the canonical
|
||||
// hash. In the unlikely case we've retrieved the section head hash since then, we'll just retrieve the
|
||||
// bit vector again from the network.
|
||||
rawdb.WriteBloomBits(db, req.BitIdx, sectionIdx, sectionHead, req.BloomBits[i]) |
||||
} |
||||
} |
||||
|
||||
// TxStatus describes the status of a transaction
|
||||
type TxStatus struct { |
||||
Status txpool.TxStatus |
||||
Lookup *rawdb.LegacyTxLookupEntry `rlp:"nil"` |
||||
Error string |
||||
} |
||||
|
||||
// TxStatusRequest is the ODR request type for retrieving transaction status
|
||||
type TxStatusRequest struct { |
||||
Hashes []common.Hash |
||||
Status []TxStatus |
||||
} |
||||
|
||||
// StoreResult stores the retrieved data in local database
|
||||
func (req *TxStatusRequest) StoreResult(db ethdb.Database) {} |
@ -1,339 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package light |
||||
|
||||
import ( |
||||
"bytes" |
||||
"context" |
||||
"errors" |
||||
"math/big" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/common/math" |
||||
"github.com/ethereum/go-ethereum/consensus/ethash" |
||||
"github.com/ethereum/go-ethereum/core" |
||||
"github.com/ethereum/go-ethereum/core/rawdb" |
||||
"github.com/ethereum/go-ethereum/core/state" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/core/vm" |
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/params" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
"github.com/ethereum/go-ethereum/trie" |
||||
"github.com/ethereum/go-ethereum/trie/trienode" |
||||
) |
||||
|
||||
var ( |
||||
testBankKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") |
||||
testBankAddress = crypto.PubkeyToAddress(testBankKey.PublicKey) |
||||
testBankFunds = big.NewInt(1_000_000_000_000_000_000) |
||||
|
||||
acc1Key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") |
||||
acc2Key, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") |
||||
acc1Addr = crypto.PubkeyToAddress(acc1Key.PublicKey) |
||||
acc2Addr = crypto.PubkeyToAddress(acc2Key.PublicKey) |
||||
|
||||
testContractCode = common.Hex2Bytes("606060405260cc8060106000396000f360606040526000357c01000000000000000000000000000000000000000000000000000000009004806360cd2685146041578063c16431b914606b57603f565b005b6055600480803590602001909190505060a9565b6040518082815260200191505060405180910390f35b60886004808035906020019091908035906020019091905050608a565b005b80600060005083606481101560025790900160005b50819055505b5050565b6000600060005082606481101560025790900160005b5054905060c7565b91905056") |
||||
testContractAddr common.Address |
||||
) |
||||
|
||||
type testOdr struct { |
||||
OdrBackend |
||||
indexerConfig *IndexerConfig |
||||
sdb, ldb ethdb.Database |
||||
serverState state.Database |
||||
disable bool |
||||
} |
||||
|
||||
func (odr *testOdr) Database() ethdb.Database { |
||||
return odr.ldb |
||||
} |
||||
|
||||
var ErrOdrDisabled = errors.New("ODR disabled") |
||||
|
||||
func (odr *testOdr) Retrieve(ctx context.Context, req OdrRequest) error { |
||||
if odr.disable { |
||||
return ErrOdrDisabled |
||||
} |
||||
switch req := req.(type) { |
||||
case *BlockRequest: |
||||
number := rawdb.ReadHeaderNumber(odr.sdb, req.Hash) |
||||
if number != nil { |
||||
req.Rlp = rawdb.ReadBodyRLP(odr.sdb, req.Hash, *number) |
||||
} |
||||
case *ReceiptsRequest: |
||||
number := rawdb.ReadHeaderNumber(odr.sdb, req.Hash) |
||||
if number != nil { |
||||
req.Receipts = rawdb.ReadRawReceipts(odr.sdb, req.Hash, *number) |
||||
} |
||||
case *TrieRequest: |
||||
var ( |
||||
err error |
||||
t state.Trie |
||||
) |
||||
if len(req.Id.AccountAddress) > 0 { |
||||
t, err = odr.serverState.OpenStorageTrie(req.Id.StateRoot, common.BytesToAddress(req.Id.AccountAddress), req.Id.Root, nil) |
||||
} else { |
||||
t, err = odr.serverState.OpenTrie(req.Id.Root) |
||||
} |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
nodes := trienode.NewProofSet() |
||||
t.Prove(req.Key, nodes) |
||||
req.Proof = nodes |
||||
case *CodeRequest: |
||||
req.Data = rawdb.ReadCode(odr.sdb, req.Hash) |
||||
} |
||||
req.StoreResult(odr.ldb) |
||||
return nil |
||||
} |
||||
|
||||
func (odr *testOdr) IndexerConfig() *IndexerConfig { |
||||
return odr.indexerConfig |
||||
} |
||||
|
||||
type odrTestFn func(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) ([]byte, error) |
||||
|
||||
func TestOdrGetBlockLes2(t *testing.T) { testChainOdr(t, 1, odrGetBlock) } |
||||
|
||||
func odrGetBlock(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) ([]byte, error) { |
||||
var block *types.Block |
||||
if bc != nil { |
||||
block = bc.GetBlockByHash(bhash) |
||||
} else { |
||||
block, _ = lc.GetBlockByHash(ctx, bhash) |
||||
} |
||||
if block == nil { |
||||
return nil, nil |
||||
} |
||||
rlp, _ := rlp.EncodeToBytes(block) |
||||
return rlp, nil |
||||
} |
||||
|
||||
func TestOdrGetReceiptsLes2(t *testing.T) { testChainOdr(t, 1, odrGetReceipts) } |
||||
|
||||
func odrGetReceipts(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) ([]byte, error) { |
||||
var receipts types.Receipts |
||||
if bc != nil { |
||||
if number := rawdb.ReadHeaderNumber(db, bhash); number != nil { |
||||
if header := rawdb.ReadHeader(db, bhash, *number); header != nil { |
||||
receipts = rawdb.ReadReceipts(db, bhash, *number, header.Time, bc.Config()) |
||||
} |
||||
} |
||||
} else { |
||||
number := rawdb.ReadHeaderNumber(db, bhash) |
||||
if number != nil { |
||||
receipts, _ = GetBlockReceipts(ctx, lc.Odr(), bhash, *number) |
||||
} |
||||
} |
||||
if receipts == nil { |
||||
return nil, nil |
||||
} |
||||
rlp, _ := rlp.EncodeToBytes(receipts) |
||||
return rlp, nil |
||||
} |
||||
|
||||
func TestOdrAccountsLes2(t *testing.T) { testChainOdr(t, 1, odrAccounts) } |
||||
|
||||
func odrAccounts(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) ([]byte, error) { |
||||
dummyAddr := common.HexToAddress("1234567812345678123456781234567812345678") |
||||
acc := []common.Address{testBankAddress, acc1Addr, acc2Addr, dummyAddr} |
||||
|
||||
var st *state.StateDB |
||||
if bc == nil { |
||||
header := lc.GetHeaderByHash(bhash) |
||||
st = NewState(ctx, header, lc.Odr()) |
||||
} else { |
||||
header := bc.GetHeaderByHash(bhash) |
||||
st, _ = state.New(header.Root, bc.StateCache(), nil) |
||||
} |
||||
|
||||
var res []byte |
||||
for _, addr := range acc { |
||||
bal := st.GetBalance(addr) |
||||
rlp, _ := rlp.EncodeToBytes(bal) |
||||
res = append(res, rlp...) |
||||
} |
||||
return res, st.Error() |
||||
} |
||||
|
||||
func TestOdrContractCallLes2(t *testing.T) { testChainOdr(t, 1, odrContractCall) } |
||||
|
||||
func odrContractCall(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) ([]byte, error) { |
||||
data := common.Hex2Bytes("60CD26850000000000000000000000000000000000000000000000000000000000000000") |
||||
config := params.TestChainConfig |
||||
|
||||
var res []byte |
||||
for i := 0; i < 3; i++ { |
||||
data[35] = byte(i) |
||||
|
||||
var ( |
||||
st *state.StateDB |
||||
header *types.Header |
||||
chain core.ChainContext |
||||
) |
||||
if bc == nil { |
||||
chain = lc |
||||
header = lc.GetHeaderByHash(bhash) |
||||
st = NewState(ctx, header, lc.Odr()) |
||||
} else { |
||||
chain = bc |
||||
header = bc.GetHeaderByHash(bhash) |
||||
st, _ = state.New(header.Root, bc.StateCache(), nil) |
||||
} |
||||
|
||||
// Perform read-only call.
|
||||
st.SetBalance(testBankAddress, math.MaxBig256) |
||||
msg := &core.Message{ |
||||
From: testBankAddress, |
||||
To: &testContractAddr, |
||||
Value: new(big.Int), |
||||
GasLimit: 1000000, |
||||
GasPrice: big.NewInt(params.InitialBaseFee), |
||||
GasFeeCap: big.NewInt(params.InitialBaseFee), |
||||
GasTipCap: new(big.Int), |
||||
Data: data, |
||||
SkipAccountChecks: true, |
||||
} |
||||
txContext := core.NewEVMTxContext(msg) |
||||
context := core.NewEVMBlockContext(header, chain, nil) |
||||
vmenv := vm.NewEVM(context, txContext, st, config, vm.Config{NoBaseFee: true}) |
||||
gp := new(core.GasPool).AddGas(math.MaxUint64) |
||||
result, _ := core.ApplyMessage(vmenv, msg, gp) |
||||
res = append(res, result.Return()...) |
||||
if st.Error() != nil { |
||||
return res, st.Error() |
||||
} |
||||
} |
||||
return res, nil |
||||
} |
||||
|
||||
func testChainGen(i int, block *core.BlockGen) { |
||||
signer := types.HomesteadSigner{} |
||||
switch i { |
||||
case 0: |
||||
// In block 1, the test bank sends account #1 some ether.
|
||||
tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(10_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, testBankKey) |
||||
block.AddTx(tx) |
||||
case 1: |
||||
// In block 2, the test bank sends some more ether to account #1.
|
||||
// acc1Addr passes it on to account #2.
|
||||
// acc1Addr creates a test contract.
|
||||
tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBankAddress), acc1Addr, big.NewInt(1_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, testBankKey) |
||||
nonce := block.TxNonce(acc1Addr) |
||||
tx2, _ := types.SignTx(types.NewTransaction(nonce, acc2Addr, big.NewInt(1_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, acc1Key) |
||||
nonce++ |
||||
tx3, _ := types.SignTx(types.NewContractCreation(nonce, big.NewInt(0), 1000000, block.BaseFee(), testContractCode), signer, acc1Key) |
||||
testContractAddr = crypto.CreateAddress(acc1Addr, nonce) |
||||
block.AddTx(tx1) |
||||
block.AddTx(tx2) |
||||
block.AddTx(tx3) |
||||
case 2: |
||||
// Block 3 is empty but was mined by account #2.
|
||||
block.SetCoinbase(acc2Addr) |
||||
block.SetExtra([]byte("yeehaw")) |
||||
data := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001") |
||||
tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBankAddress), testContractAddr, big.NewInt(0), 100000, block.BaseFee(), data), signer, testBankKey) |
||||
block.AddTx(tx) |
||||
case 3: |
||||
// Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data).
|
||||
b2 := block.PrevBlock(1).Header() |
||||
b2.Extra = []byte("foo") |
||||
block.AddUncle(b2) |
||||
b3 := block.PrevBlock(2).Header() |
||||
b3.Extra = []byte("foo") |
||||
block.AddUncle(b3) |
||||
data := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000002") |
||||
tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBankAddress), testContractAddr, big.NewInt(0), 100000, block.BaseFee(), data), signer, testBankKey) |
||||
block.AddTx(tx) |
||||
} |
||||
} |
||||
|
||||
func testChainOdr(t *testing.T, protocol int, fn odrTestFn) { |
||||
var ( |
||||
sdb = rawdb.NewMemoryDatabase() |
||||
ldb = rawdb.NewMemoryDatabase() |
||||
gspec = &core.Genesis{ |
||||
Config: params.TestChainConfig, |
||||
Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}}, |
||||
BaseFee: big.NewInt(params.InitialBaseFee), |
||||
} |
||||
) |
||||
// Assemble the test environment
|
||||
blockchain, _ := core.NewBlockChain(sdb, nil, gspec, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil) |
||||
_, gchain, _ := core.GenerateChainWithGenesis(gspec, ethash.NewFaker(), 4, testChainGen) |
||||
if _, err := blockchain.InsertChain(gchain); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
gspec.MustCommit(ldb, trie.NewDatabase(ldb, trie.HashDefaults)) |
||||
odr := &testOdr{sdb: sdb, ldb: ldb, serverState: blockchain.StateCache(), indexerConfig: TestClientIndexerConfig} |
||||
lightchain, err := NewLightChain(odr, gspec.Config, ethash.NewFullFaker()) |
||||
if err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
headers := make([]*types.Header, len(gchain)) |
||||
for i, block := range gchain { |
||||
headers[i] = block.Header() |
||||
} |
||||
if _, err := lightchain.InsertHeaderChain(headers); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
|
||||
test := func(expFail int) { |
||||
for i := uint64(0); i <= blockchain.CurrentHeader().Number.Uint64(); i++ { |
||||
bhash := rawdb.ReadCanonicalHash(sdb, i) |
||||
b1, err := fn(NoOdr, sdb, blockchain, nil, bhash) |
||||
if err != nil { |
||||
t.Fatalf("error in full-node test for block %d: %v", i, err) |
||||
} |
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) |
||||
defer cancel() |
||||
|
||||
exp := i < uint64(expFail) |
||||
b2, err := fn(ctx, ldb, nil, lightchain, bhash) |
||||
if err != nil && exp { |
||||
t.Errorf("error in ODR test for block %d: %v", i, err) |
||||
} |
||||
|
||||
eq := bytes.Equal(b1, b2) |
||||
if exp && !eq { |
||||
t.Errorf("ODR test output for block %d doesn't match full node", i) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// expect retrievals to fail (except genesis block) without a les peer
|
||||
t.Log("checking without ODR") |
||||
odr.disable = true |
||||
test(1) |
||||
|
||||
// expect all retrievals to pass with ODR enabled
|
||||
t.Log("checking with ODR") |
||||
odr.disable = false |
||||
test(len(gchain)) |
||||
|
||||
// still expect all retrievals to pass, now data should be cached locally
|
||||
t.Log("checking without ODR, should be cached") |
||||
odr.disable = true |
||||
test(len(gchain)) |
||||
} |
@ -1,275 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package light |
||||
|
||||
import ( |
||||
"context" |
||||
"errors" |
||||
"math/big" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/consensus/misc/eip4844" |
||||
"github.com/ethereum/go-ethereum/core/rawdb" |
||||
"github.com/ethereum/go-ethereum/core/txpool" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
) |
||||
|
||||
// errNonCanonicalHash is returned if the requested chain data doesn't belong
|
||||
// to the canonical chain. ODR can only retrieve the canonical chain data covered
|
||||
// by the CHT or Bloom trie for verification.
|
||||
var errNonCanonicalHash = errors.New("hash is not currently canonical") |
||||
|
||||
// GetHeaderByNumber retrieves the canonical block header corresponding to the
|
||||
// given number. The returned header is proven by local CHT.
|
||||
func GetHeaderByNumber(ctx context.Context, odr OdrBackend, number uint64) (*types.Header, error) { |
||||
// Try to find it in the local database first.
|
||||
db := odr.Database() |
||||
hash := rawdb.ReadCanonicalHash(db, number) |
||||
|
||||
// If there is a canonical hash, there should have a header too.
|
||||
// But if it's pruned, re-fetch from network again.
|
||||
if (hash != common.Hash{}) { |
||||
if header := rawdb.ReadHeader(db, hash, number); header != nil { |
||||
return header, nil |
||||
} |
||||
} |
||||
// Retrieve the header via ODR, ensure the requested header is covered
|
||||
// by local trusted CHT.
|
||||
chts, _, chtHead := odr.ChtIndexer().Sections() |
||||
if number >= chts*odr.IndexerConfig().ChtSize { |
||||
return nil, errNoTrustedCht |
||||
} |
||||
r := &ChtRequest{ |
||||
ChtRoot: GetChtRoot(db, chts-1, chtHead), |
||||
ChtNum: chts - 1, |
||||
BlockNum: number, |
||||
Config: odr.IndexerConfig(), |
||||
} |
||||
if err := odr.Retrieve(ctx, r); err != nil { |
||||
return nil, err |
||||
} |
||||
return r.Header, nil |
||||
} |
||||
|
||||
// GetCanonicalHash retrieves the canonical block hash corresponding to the number.
|
||||
func GetCanonicalHash(ctx context.Context, odr OdrBackend, number uint64) (common.Hash, error) { |
||||
hash := rawdb.ReadCanonicalHash(odr.Database(), number) |
||||
if hash != (common.Hash{}) { |
||||
return hash, nil |
||||
} |
||||
header, err := GetHeaderByNumber(ctx, odr, number) |
||||
if err != nil { |
||||
return common.Hash{}, err |
||||
} |
||||
// number -> canonical mapping already be stored in db, get it.
|
||||
return header.Hash(), nil |
||||
} |
||||
|
||||
// GetTd retrieves the total difficulty corresponding to the number and hash.
|
||||
func GetTd(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) (*big.Int, error) { |
||||
td := rawdb.ReadTd(odr.Database(), hash, number) |
||||
if td != nil { |
||||
return td, nil |
||||
} |
||||
header, err := GetHeaderByNumber(ctx, odr, number) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
if header.Hash() != hash { |
||||
return nil, errNonCanonicalHash |
||||
} |
||||
// <hash, number> -> td mapping already be stored in db, get it.
|
||||
return rawdb.ReadTd(odr.Database(), hash, number), nil |
||||
} |
||||
|
||||
// GetBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
|
||||
func GetBodyRLP(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) (rlp.RawValue, error) { |
||||
if data := rawdb.ReadBodyRLP(odr.Database(), hash, number); data != nil { |
||||
return data, nil |
||||
} |
||||
// Retrieve the block header first and pass it for verification.
|
||||
header, err := GetHeaderByNumber(ctx, odr, number) |
||||
if err != nil { |
||||
return nil, errNoHeader |
||||
} |
||||
if header.Hash() != hash { |
||||
return nil, errNonCanonicalHash |
||||
} |
||||
r := &BlockRequest{Hash: hash, Number: number, Header: header} |
||||
if err := odr.Retrieve(ctx, r); err != nil { |
||||
return nil, err |
||||
} |
||||
return r.Rlp, nil |
||||
} |
||||
|
||||
// GetBody retrieves the block body (transactions, uncles) corresponding to the
|
||||
// hash.
|
||||
func GetBody(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) (*types.Body, error) { |
||||
data, err := GetBodyRLP(ctx, odr, hash, number) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
body := new(types.Body) |
||||
if err := rlp.DecodeBytes(data, body); err != nil { |
||||
return nil, err |
||||
} |
||||
return body, nil |
||||
} |
||||
|
||||
// GetBlock retrieves an entire block corresponding to the hash, assembling it
|
||||
// back from the stored header and body.
|
||||
func GetBlock(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) (*types.Block, error) { |
||||
// Retrieve the block header and body contents
|
||||
header, err := GetHeaderByNumber(ctx, odr, number) |
||||
if err != nil { |
||||
return nil, errNoHeader |
||||
} |
||||
body, err := GetBody(ctx, odr, hash, number) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
// Reassemble the block and return
|
||||
return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles), nil |
||||
} |
||||
|
||||
// GetBlockReceipts retrieves the receipts generated by the transactions included
|
||||
// in a block given by its hash. Receipts will be filled in with context data.
|
||||
func GetBlockReceipts(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) (types.Receipts, error) { |
||||
// Assume receipts are already stored locally and attempt to retrieve.
|
||||
receipts := rawdb.ReadRawReceipts(odr.Database(), hash, number) |
||||
if receipts == nil { |
||||
header, err := GetHeaderByNumber(ctx, odr, number) |
||||
if err != nil { |
||||
return nil, errNoHeader |
||||
} |
||||
if header.Hash() != hash { |
||||
return nil, errNonCanonicalHash |
||||
} |
||||
r := &ReceiptsRequest{Hash: hash, Number: number, Header: header} |
||||
if err := odr.Retrieve(ctx, r); err != nil { |
||||
return nil, err |
||||
} |
||||
receipts = r.Receipts |
||||
} |
||||
// If the receipts are incomplete, fill the derived fields
|
||||
if len(receipts) > 0 && receipts[0].TxHash == (common.Hash{}) { |
||||
block, err := GetBlock(ctx, odr, hash, number) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
genesis := rawdb.ReadCanonicalHash(odr.Database(), 0) |
||||
config := rawdb.ReadChainConfig(odr.Database(), genesis) |
||||
|
||||
var blobGasPrice *big.Int |
||||
excessBlobGas := block.ExcessBlobGas() |
||||
if excessBlobGas != nil { |
||||
blobGasPrice = eip4844.CalcBlobFee(*excessBlobGas) |
||||
} |
||||
|
||||
if err := receipts.DeriveFields(config, block.Hash(), block.NumberU64(), block.Time(), block.BaseFee(), blobGasPrice, block.Transactions()); err != nil { |
||||
return nil, err |
||||
} |
||||
rawdb.WriteReceipts(odr.Database(), hash, number, receipts) |
||||
} |
||||
return receipts, nil |
||||
} |
||||
|
||||
// GetBlockLogs retrieves the logs generated by the transactions included in a
|
||||
// block given by its hash. Logs will be filled in with context data.
|
||||
func GetBlockLogs(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) ([][]*types.Log, error) { |
||||
receipts, err := GetBlockReceipts(ctx, odr, hash, number) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
logs := make([][]*types.Log, len(receipts)) |
||||
for i, receipt := range receipts { |
||||
logs[i] = receipt.Logs |
||||
} |
||||
return logs, nil |
||||
} |
||||
|
||||
// GetBloomBits retrieves a batch of compressed bloomBits vectors belonging to
|
||||
// the given bit index and section indexes.
|
||||
func GetBloomBits(ctx context.Context, odr OdrBackend, bit uint, sections []uint64) ([][]byte, error) { |
||||
var ( |
||||
reqIndex []int |
||||
reqSections []uint64 |
||||
db = odr.Database() |
||||
result = make([][]byte, len(sections)) |
||||
) |
||||
blooms, _, sectionHead := odr.BloomTrieIndexer().Sections() |
||||
for i, section := range sections { |
||||
sectionHead := rawdb.ReadCanonicalHash(db, (section+1)*odr.IndexerConfig().BloomSize-1) |
||||
// If we don't have the canonical hash stored for this section head number,
|
||||
// we'll still look for an entry with a zero sectionHead (we store it with
|
||||
// zero section head too if we don't know it at the time of the retrieval)
|
||||
if bloomBits, _ := rawdb.ReadBloomBits(db, bit, section, sectionHead); len(bloomBits) != 0 { |
||||
result[i] = bloomBits |
||||
continue |
||||
} |
||||
// TODO(rjl493456442) Convert sectionIndex to BloomTrie relative index
|
||||
if section >= blooms { |
||||
return nil, errNoTrustedBloomTrie |
||||
} |
||||
reqSections = append(reqSections, section) |
||||
reqIndex = append(reqIndex, i) |
||||
} |
||||
// Find all bloombits in database, nothing to query via odr, return.
|
||||
if reqSections == nil { |
||||
return result, nil |
||||
} |
||||
// Send odr request to retrieve missing bloombits.
|
||||
r := &BloomRequest{ |
||||
BloomTrieRoot: GetBloomTrieRoot(db, blooms-1, sectionHead), |
||||
BloomTrieNum: blooms - 1, |
||||
BitIdx: bit, |
||||
SectionIndexList: reqSections, |
||||
Config: odr.IndexerConfig(), |
||||
} |
||||
if err := odr.Retrieve(ctx, r); err != nil { |
||||
return nil, err |
||||
} |
||||
for i, idx := range reqIndex { |
||||
result[idx] = r.BloomBits[i] |
||||
} |
||||
return result, nil |
||||
} |
||||
|
||||
// GetTransaction retrieves a canonical transaction by hash and also returns
|
||||
// its position in the chain. There is no guarantee in the LES protocol that
|
||||
// the mined transaction will be retrieved back for sure because of different
|
||||
// reasons(the transaction is unindexed, the malicious server doesn't reply it
|
||||
// deliberately, etc). Therefore, unretrieved transactions will receive a certain
|
||||
// number of retries, thus giving a weak guarantee.
|
||||
func GetTransaction(ctx context.Context, odr OdrBackend, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) { |
||||
r := &TxStatusRequest{Hashes: []common.Hash{txHash}} |
||||
if err := odr.RetrieveTxStatus(ctx, r); err != nil || r.Status[0].Status != txpool.TxStatusIncluded { |
||||
return nil, common.Hash{}, 0, 0, err |
||||
} |
||||
pos := r.Status[0].Lookup |
||||
// first ensure that we have the header, otherwise block body retrieval will fail
|
||||
// also verify if this is a canonical block by getting the header by number and checking its hash
|
||||
if header, err := GetHeaderByNumber(ctx, odr, pos.BlockIndex); err != nil || header.Hash() != pos.BlockHash { |
||||
return nil, common.Hash{}, 0, 0, err |
||||
} |
||||
body, err := GetBody(ctx, odr, pos.BlockHash, pos.BlockIndex) |
||||
if err != nil || uint64(len(body.Transactions)) <= pos.Index || body.Transactions[pos.Index].Hash() != txHash { |
||||
return nil, common.Hash{}, 0, 0, err |
||||
} |
||||
return body.Transactions[pos.Index], pos.BlockHash, pos.BlockIndex, pos.Index, nil |
||||
} |
@ -1,538 +0,0 @@ |
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package light |
||||
|
||||
import ( |
||||
"bytes" |
||||
"context" |
||||
"encoding/binary" |
||||
"errors" |
||||
"fmt" |
||||
"math/big" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/common/bitutil" |
||||
"github.com/ethereum/go-ethereum/core" |
||||
"github.com/ethereum/go-ethereum/core/rawdb" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/params" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
"github.com/ethereum/go-ethereum/trie" |
||||
"github.com/ethereum/go-ethereum/trie/trienode" |
||||
) |
||||
|
||||
// IndexerConfig includes a set of configs for chain indexers.
|
||||
type IndexerConfig struct { |
||||
// The block frequency for creating CHTs.
|
||||
ChtSize uint64 |
||||
|
||||
// The number of confirmations needed to generate/accept a canonical hash help trie.
|
||||
ChtConfirms uint64 |
||||
|
||||
// The block frequency for creating new bloom bits.
|
||||
BloomSize uint64 |
||||
|
||||
// The number of confirmation needed before a bloom section is considered probably final and its rotated bits
|
||||
// are calculated.
|
||||
BloomConfirms uint64 |
||||
|
||||
// The block frequency for creating BloomTrie.
|
||||
BloomTrieSize uint64 |
||||
|
||||
// The number of confirmations needed to generate/accept a bloom trie.
|
||||
BloomTrieConfirms uint64 |
||||
} |
||||
|
||||
var ( |
||||
// DefaultServerIndexerConfig wraps a set of configs as a default indexer config for server side.
|
||||
DefaultServerIndexerConfig = &IndexerConfig{ |
||||
ChtSize: params.CHTFrequency, |
||||
ChtConfirms: params.HelperTrieProcessConfirmations, |
||||
BloomSize: params.BloomBitsBlocks, |
||||
BloomConfirms: params.BloomConfirms, |
||||
BloomTrieSize: params.BloomTrieFrequency, |
||||
BloomTrieConfirms: params.HelperTrieProcessConfirmations, |
||||
} |
||||
// DefaultClientIndexerConfig wraps a set of configs as a default indexer config for client side.
|
||||
DefaultClientIndexerConfig = &IndexerConfig{ |
||||
ChtSize: params.CHTFrequency, |
||||
ChtConfirms: params.HelperTrieConfirmations, |
||||
BloomSize: params.BloomBitsBlocksClient, |
||||
BloomConfirms: params.HelperTrieConfirmations, |
||||
BloomTrieSize: params.BloomTrieFrequency, |
||||
BloomTrieConfirms: params.HelperTrieConfirmations, |
||||
} |
||||
// TestServerIndexerConfig wraps a set of configs as a test indexer config for server side.
|
||||
TestServerIndexerConfig = &IndexerConfig{ |
||||
ChtSize: 128, |
||||
ChtConfirms: 1, |
||||
BloomSize: 16, |
||||
BloomConfirms: 1, |
||||
BloomTrieSize: 128, |
||||
BloomTrieConfirms: 1, |
||||
} |
||||
// TestClientIndexerConfig wraps a set of configs as a test indexer config for client side.
|
||||
TestClientIndexerConfig = &IndexerConfig{ |
||||
ChtSize: 128, |
||||
ChtConfirms: 8, |
||||
BloomSize: 128, |
||||
BloomConfirms: 8, |
||||
BloomTrieSize: 128, |
||||
BloomTrieConfirms: 8, |
||||
} |
||||
) |
||||
|
||||
var ( |
||||
errNoTrustedCht = errors.New("no trusted canonical hash trie") |
||||
errNoTrustedBloomTrie = errors.New("no trusted bloom trie") |
||||
errNoHeader = errors.New("header not found") |
||||
) |
||||
|
||||
// ChtNode structures are stored in the Canonical Hash Trie in an RLP encoded format
|
||||
type ChtNode struct { |
||||
Hash common.Hash |
||||
Td *big.Int |
||||
} |
||||
|
||||
// GetChtRoot reads the CHT root associated to the given section from the database
|
||||
func GetChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) common.Hash { |
||||
var encNumber [8]byte |
||||
binary.BigEndian.PutUint64(encNumber[:], sectionIdx) |
||||
data, _ := db.Get(append(append(rawdb.ChtPrefix, encNumber[:]...), sectionHead.Bytes()...)) |
||||
return common.BytesToHash(data) |
||||
} |
||||
|
||||
// StoreChtRoot writes the CHT root associated to the given section into the database
|
||||
func StoreChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead, root common.Hash) { |
||||
var encNumber [8]byte |
||||
binary.BigEndian.PutUint64(encNumber[:], sectionIdx) |
||||
db.Put(append(append(rawdb.ChtPrefix, encNumber[:]...), sectionHead.Bytes()...), root.Bytes()) |
||||
} |
||||
|
||||
// ChtIndexerBackend implements core.ChainIndexerBackend.
|
||||
type ChtIndexerBackend struct { |
||||
disablePruning bool |
||||
diskdb, trieTable ethdb.Database |
||||
odr OdrBackend |
||||
triedb *trie.Database |
||||
section, sectionSize uint64 |
||||
lastHash common.Hash |
||||
trie *trie.Trie |
||||
originRoot common.Hash |
||||
} |
||||
|
||||
// NewChtIndexer creates a Cht chain indexer
|
||||
func NewChtIndexer(db ethdb.Database, odr OdrBackend, size, confirms uint64, disablePruning bool) *core.ChainIndexer { |
||||
trieTable := rawdb.NewTable(db, string(rawdb.ChtTablePrefix)) |
||||
backend := &ChtIndexerBackend{ |
||||
diskdb: db, |
||||
odr: odr, |
||||
trieTable: trieTable, |
||||
triedb: trie.NewDatabase(trieTable, trie.HashDefaults), |
||||
sectionSize: size, |
||||
disablePruning: disablePruning, |
||||
} |
||||
return core.NewChainIndexer(db, rawdb.NewTable(db, string(rawdb.ChtIndexTablePrefix)), backend, size, confirms, time.Millisecond*100, "cht") |
||||
} |
||||
|
||||
// fetchMissingNodes tries to retrieve the last entry of the latest trusted CHT from the
|
||||
// ODR backend in order to be able to add new entries and calculate subsequent root hashes
|
||||
func (c *ChtIndexerBackend) fetchMissingNodes(ctx context.Context, section uint64, root common.Hash) error { |
||||
batch := c.trieTable.NewBatch() |
||||
r := &ChtRequest{ChtRoot: root, ChtNum: section - 1, BlockNum: section*c.sectionSize - 1, Config: c.odr.IndexerConfig()} |
||||
for { |
||||
err := c.odr.Retrieve(ctx, r) |
||||
switch err { |
||||
case nil: |
||||
r.Proof.Store(batch) |
||||
return batch.Write() |
||||
case ErrNoPeers: |
||||
// if there are no peers to serve, retry later
|
||||
select { |
||||
case <-ctx.Done(): |
||||
return ctx.Err() |
||||
case <-time.After(time.Second * 10): |
||||
// stay in the loop and try again
|
||||
} |
||||
default: |
||||
return err |
||||
} |
||||
} |
||||
} |
||||
|
||||
// Reset implements core.ChainIndexerBackend
|
||||
func (c *ChtIndexerBackend) Reset(ctx context.Context, section uint64, lastSectionHead common.Hash) error { |
||||
root := types.EmptyRootHash |
||||
if section > 0 { |
||||
root = GetChtRoot(c.diskdb, section-1, lastSectionHead) |
||||
} |
||||
var err error |
||||
c.trie, err = trie.New(trie.TrieID(root), c.triedb) |
||||
|
||||
if err != nil && c.odr != nil { |
||||
err = c.fetchMissingNodes(ctx, section, root) |
||||
if err == nil { |
||||
c.trie, err = trie.New(trie.TrieID(root), c.triedb) |
||||
} |
||||
} |
||||
c.section = section |
||||
c.originRoot = root |
||||
return err |
||||
} |
||||
|
||||
// Process implements core.ChainIndexerBackend
|
||||
func (c *ChtIndexerBackend) Process(ctx context.Context, header *types.Header) error { |
||||
hash, num := header.Hash(), header.Number.Uint64() |
||||
c.lastHash = hash |
||||
|
||||
td := rawdb.ReadTd(c.diskdb, hash, num) |
||||
if td == nil { |
||||
panic(nil) |
||||
} |
||||
var encNumber [8]byte |
||||
binary.BigEndian.PutUint64(encNumber[:], num) |
||||
data, _ := rlp.EncodeToBytes(ChtNode{hash, td}) |
||||
return c.trie.Update(encNumber[:], data) |
||||
} |
||||
|
||||
// Commit implements core.ChainIndexerBackend
|
||||
func (c *ChtIndexerBackend) Commit() error { |
||||
root, nodes, err := c.trie.Commit(false) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
// Commit trie changes into trie database in case it's not nil.
|
||||
if nodes != nil { |
||||
if err := c.triedb.Update(root, c.originRoot, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { |
||||
return err |
||||
} |
||||
if err := c.triedb.Commit(root, false); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
// Re-create trie with newly generated root and updated database.
|
||||
c.trie, err = trie.New(trie.TrieID(root), c.triedb) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
// Pruning historical trie nodes if necessary.
|
||||
if !c.disablePruning { |
||||
it := c.trieTable.NewIterator(nil, nil) |
||||
defer it.Release() |
||||
|
||||
var ( |
||||
deleted int |
||||
batch = c.trieTable.NewBatch() |
||||
t = time.Now() |
||||
) |
||||
hashes := make(map[common.Hash]struct{}) |
||||
if nodes != nil { |
||||
for _, hash := range nodes.Hashes() { |
||||
hashes[hash] = struct{}{} |
||||
} |
||||
} |
||||
for it.Next() { |
||||
trimmed := bytes.TrimPrefix(it.Key(), rawdb.ChtTablePrefix) |
||||
if len(trimmed) == common.HashLength { |
||||
if _, ok := hashes[common.BytesToHash(trimmed)]; !ok { |
||||
batch.Delete(trimmed) |
||||
deleted += 1 |
||||
} |
||||
} |
||||
} |
||||
if err := batch.Write(); err != nil { |
||||
return err |
||||
} |
||||
log.Debug("Prune historical CHT trie nodes", "deleted", deleted, "remaining", len(hashes), "elapsed", common.PrettyDuration(time.Since(t))) |
||||
} |
||||
log.Info("Storing CHT", "section", c.section, "head", fmt.Sprintf("%064x", c.lastHash), "root", fmt.Sprintf("%064x", root)) |
||||
StoreChtRoot(c.diskdb, c.section, c.lastHash, root) |
||||
return nil |
||||
} |
||||
|
||||
// Prune implements core.ChainIndexerBackend which deletes all chain data
|
||||
// (except hash<->number mappings) older than the specified threshold.
|
||||
func (c *ChtIndexerBackend) Prune(threshold uint64) error { |
||||
// Short circuit if the light pruning is disabled.
|
||||
if c.disablePruning { |
||||
return nil |
||||
} |
||||
t := time.Now() |
||||
// Always keep genesis header in database.
|
||||
start, end := uint64(1), (threshold+1)*c.sectionSize |
||||
|
||||
var batch = c.diskdb.NewBatch() |
||||
for { |
||||
numbers, hashes := rawdb.ReadAllCanonicalHashes(c.diskdb, start, end, 10240) |
||||
if len(numbers) == 0 { |
||||
break |
||||
} |
||||
for i := 0; i < len(numbers); i++ { |
||||
// Keep hash<->number mapping in database otherwise the hash based
|
||||
// API(e.g. GetReceipt, GetLogs) will be broken.
|
||||
//
|
||||
// Storage size wise, the size of a mapping is ~41bytes. For one
|
||||
// section is about 1.3MB which is acceptable.
|
||||
//
|
||||
// In order to totally get rid of this index, we need an additional
|
||||
// flag to specify how many historical data light client can serve.
|
||||
rawdb.DeleteCanonicalHash(batch, numbers[i]) |
||||
rawdb.DeleteBlockWithoutNumber(batch, hashes[i], numbers[i]) |
||||
} |
||||
if batch.ValueSize() > ethdb.IdealBatchSize { |
||||
if err := batch.Write(); err != nil { |
||||
return err |
||||
} |
||||
batch.Reset() |
||||
} |
||||
start = numbers[len(numbers)-1] + 1 |
||||
} |
||||
if err := batch.Write(); err != nil { |
||||
return err |
||||
} |
||||
log.Debug("Prune history headers", "threshold", threshold, "elapsed", common.PrettyDuration(time.Since(t))) |
||||
return nil |
||||
} |
||||
|
||||
// GetBloomTrieRoot reads the BloomTrie root associated to the given section from the database
|
||||
func GetBloomTrieRoot(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) common.Hash { |
||||
var encNumber [8]byte |
||||
binary.BigEndian.PutUint64(encNumber[:], sectionIdx) |
||||
data, _ := db.Get(append(append(rawdb.BloomTriePrefix, encNumber[:]...), sectionHead.Bytes()...)) |
||||
return common.BytesToHash(data) |
||||
} |
||||
|
||||
// StoreBloomTrieRoot writes the BloomTrie root associated to the given section into the database
|
||||
func StoreBloomTrieRoot(db ethdb.Database, sectionIdx uint64, sectionHead, root common.Hash) { |
||||
var encNumber [8]byte |
||||
binary.BigEndian.PutUint64(encNumber[:], sectionIdx) |
||||
db.Put(append(append(rawdb.BloomTriePrefix, encNumber[:]...), sectionHead.Bytes()...), root.Bytes()) |
||||
} |
||||
|
||||
// BloomTrieIndexerBackend implements core.ChainIndexerBackend
|
||||
type BloomTrieIndexerBackend struct { |
||||
disablePruning bool |
||||
diskdb, trieTable ethdb.Database |
||||
triedb *trie.Database |
||||
odr OdrBackend |
||||
section uint64 |
||||
parentSize uint64 |
||||
size uint64 |
||||
bloomTrieRatio uint64 |
||||
trie *trie.Trie |
||||
originRoot common.Hash |
||||
sectionHeads []common.Hash |
||||
} |
||||
|
||||
// NewBloomTrieIndexer creates a BloomTrie chain indexer
|
||||
func NewBloomTrieIndexer(db ethdb.Database, odr OdrBackend, parentSize, size uint64, disablePruning bool) *core.ChainIndexer { |
||||
trieTable := rawdb.NewTable(db, string(rawdb.BloomTrieTablePrefix)) |
||||
backend := &BloomTrieIndexerBackend{ |
||||
diskdb: db, |
||||
odr: odr, |
||||
trieTable: trieTable, |
||||
triedb: trie.NewDatabase(trieTable, trie.HashDefaults), |
||||
parentSize: parentSize, |
||||
size: size, |
||||
disablePruning: disablePruning, |
||||
} |
||||
backend.bloomTrieRatio = size / parentSize |
||||
backend.sectionHeads = make([]common.Hash, backend.bloomTrieRatio) |
||||
return core.NewChainIndexer(db, rawdb.NewTable(db, string(rawdb.BloomTrieIndexPrefix)), backend, size, 0, time.Millisecond*100, "bloomtrie") |
||||
} |
||||
|
||||
// fetchMissingNodes tries to retrieve the last entries of the latest trusted bloom trie from the
|
||||
// ODR backend in order to be able to add new entries and calculate subsequent root hashes
|
||||
func (b *BloomTrieIndexerBackend) fetchMissingNodes(ctx context.Context, section uint64, root common.Hash) error { |
||||
indexCh := make(chan uint, types.BloomBitLength) |
||||
type res struct { |
||||
nodes *trienode.ProofSet |
||||
err error |
||||
} |
||||
resCh := make(chan res, types.BloomBitLength) |
||||
for i := 0; i < 20; i++ { |
||||
go func() { |
||||
for bitIndex := range indexCh { |
||||
r := &BloomRequest{BloomTrieRoot: root, BloomTrieNum: section - 1, BitIdx: bitIndex, SectionIndexList: []uint64{section - 1}, Config: b.odr.IndexerConfig()} |
||||
for { |
||||
if err := b.odr.Retrieve(ctx, r); err == ErrNoPeers { |
||||
// if there are no peers to serve, retry later
|
||||
select { |
||||
case <-ctx.Done(): |
||||
resCh <- res{nil, ctx.Err()} |
||||
return |
||||
case <-time.After(time.Second * 10): |
||||
// stay in the loop and try again
|
||||
} |
||||
} else { |
||||
resCh <- res{r.Proofs, err} |
||||
break |
||||
} |
||||
} |
||||
} |
||||
}() |
||||
} |
||||
for i := uint(0); i < types.BloomBitLength; i++ { |
||||
indexCh <- i |
||||
} |
||||
close(indexCh) |
||||
batch := b.trieTable.NewBatch() |
||||
for i := uint(0); i < types.BloomBitLength; i++ { |
||||
res := <-resCh |
||||
if res.err != nil { |
||||
return res.err |
||||
} |
||||
res.nodes.Store(batch) |
||||
} |
||||
return batch.Write() |
||||
} |
||||
|
||||
// Reset implements core.ChainIndexerBackend
|
||||
func (b *BloomTrieIndexerBackend) Reset(ctx context.Context, section uint64, lastSectionHead common.Hash) error { |
||||
root := types.EmptyRootHash |
||||
if section > 0 { |
||||
root = GetBloomTrieRoot(b.diskdb, section-1, lastSectionHead) |
||||
} |
||||
var err error |
||||
b.trie, err = trie.New(trie.TrieID(root), b.triedb) |
||||
if err != nil && b.odr != nil { |
||||
err = b.fetchMissingNodes(ctx, section, root) |
||||
if err == nil { |
||||
b.trie, err = trie.New(trie.TrieID(root), b.triedb) |
||||
} |
||||
} |
||||
b.section = section |
||||
b.originRoot = root |
||||
return err |
||||
} |
||||
|
||||
// Process implements core.ChainIndexerBackend
|
||||
func (b *BloomTrieIndexerBackend) Process(ctx context.Context, header *types.Header) error { |
||||
num := header.Number.Uint64() - b.section*b.size |
||||
if (num+1)%b.parentSize == 0 { |
||||
b.sectionHeads[num/b.parentSize] = header.Hash() |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// Commit implements core.ChainIndexerBackend
|
||||
func (b *BloomTrieIndexerBackend) Commit() error { |
||||
var compSize, decompSize uint64 |
||||
|
||||
for i := uint(0); i < types.BloomBitLength; i++ { |
||||
var encKey [10]byte |
||||
binary.BigEndian.PutUint16(encKey[0:2], uint16(i)) |
||||
binary.BigEndian.PutUint64(encKey[2:10], b.section) |
||||
var decomp []byte |
||||
for j := uint64(0); j < b.bloomTrieRatio; j++ { |
||||
data, err := rawdb.ReadBloomBits(b.diskdb, i, b.section*b.bloomTrieRatio+j, b.sectionHeads[j]) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
decompData, err2 := bitutil.DecompressBytes(data, int(b.parentSize/8)) |
||||
if err2 != nil { |
||||
return err2 |
||||
} |
||||
decomp = append(decomp, decompData...) |
||||
} |
||||
comp := bitutil.CompressBytes(decomp) |
||||
|
||||
decompSize += uint64(len(decomp)) |
||||
compSize += uint64(len(comp)) |
||||
|
||||
var terr error |
||||
if len(comp) > 0 { |
||||
terr = b.trie.Update(encKey[:], comp) |
||||
} else { |
||||
terr = b.trie.Delete(encKey[:]) |
||||
} |
||||
if terr != nil { |
||||
return terr |
||||
} |
||||
} |
||||
root, nodes, err := b.trie.Commit(false) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
// Commit trie changes into trie database in case it's not nil.
|
||||
if nodes != nil { |
||||
if err := b.triedb.Update(root, b.originRoot, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { |
||||
return err |
||||
} |
||||
if err := b.triedb.Commit(root, false); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
// Re-create trie with newly generated root and updated database.
|
||||
b.trie, err = trie.New(trie.TrieID(root), b.triedb) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
// Pruning historical trie nodes if necessary.
|
||||
if !b.disablePruning { |
||||
it := b.trieTable.NewIterator(nil, nil) |
||||
defer it.Release() |
||||
|
||||
var ( |
||||
deleted int |
||||
batch = b.trieTable.NewBatch() |
||||
t = time.Now() |
||||
) |
||||
hashes := make(map[common.Hash]struct{}) |
||||
if nodes != nil { |
||||
for _, hash := range nodes.Hashes() { |
||||
hashes[hash] = struct{}{} |
||||
} |
||||
} |
||||
for it.Next() { |
||||
trimmed := bytes.TrimPrefix(it.Key(), rawdb.BloomTrieTablePrefix) |
||||
if len(trimmed) == common.HashLength { |
||||
if _, ok := hashes[common.BytesToHash(trimmed)]; !ok { |
||||
batch.Delete(trimmed) |
||||
deleted += 1 |
||||
} |
||||
} |
||||
} |
||||
if err := batch.Write(); err != nil { |
||||
return err |
||||
} |
||||
log.Debug("Prune historical bloom trie nodes", "deleted", deleted, "remaining", len(hashes), "elapsed", common.PrettyDuration(time.Since(t))) |
||||
} |
||||
sectionHead := b.sectionHeads[b.bloomTrieRatio-1] |
||||
StoreBloomTrieRoot(b.diskdb, b.section, sectionHead, root) |
||||
log.Info("Storing bloom trie", "section", b.section, "head", fmt.Sprintf("%064x", sectionHead), "root", fmt.Sprintf("%064x", root), "compression", float64(compSize)/float64(decompSize)) |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// Prune implements core.ChainIndexerBackend which deletes all
|
||||
// bloombits which older than the specified threshold.
|
||||
func (b *BloomTrieIndexerBackend) Prune(threshold uint64) error { |
||||
// Short circuit if the light pruning is disabled.
|
||||
if b.disablePruning { |
||||
return nil |
||||
} |
||||
start := time.Now() |
||||
for i := uint(0); i < types.BloomBitLength; i++ { |
||||
rawdb.DeleteBloombits(b.diskdb, i, 0, threshold*b.bloomTrieRatio+b.bloomTrieRatio) |
||||
} |
||||
log.Debug("Prune history bloombits", "threshold", threshold, "elapsed", common.PrettyDuration(time.Since(start))) |
||||
return nil |
||||
} |
@ -1,319 +0,0 @@ |
||||
// Copyright 2015 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package light |
||||
|
||||
import ( |
||||
"context" |
||||
"errors" |
||||
"fmt" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/core/rawdb" |
||||
"github.com/ethereum/go-ethereum/core/state" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/crypto" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
"github.com/ethereum/go-ethereum/trie" |
||||
"github.com/ethereum/go-ethereum/trie/trienode" |
||||
) |
||||
|
||||
var ( |
||||
sha3Nil = crypto.Keccak256Hash(nil) |
||||
) |
||||
|
||||
func NewState(ctx context.Context, head *types.Header, odr OdrBackend) *state.StateDB { |
||||
state, _ := state.New(head.Root, NewStateDatabase(ctx, head, odr), nil) |
||||
return state |
||||
} |
||||
|
||||
func NewStateDatabase(ctx context.Context, head *types.Header, odr OdrBackend) state.Database { |
||||
return &odrDatabase{ctx, StateTrieID(head), odr} |
||||
} |
||||
|
||||
type odrDatabase struct { |
||||
ctx context.Context |
||||
id *TrieID |
||||
backend OdrBackend |
||||
} |
||||
|
||||
func (db *odrDatabase) OpenTrie(root common.Hash) (state.Trie, error) { |
||||
return &odrTrie{db: db, id: db.id}, nil |
||||
} |
||||
|
||||
func (db *odrDatabase) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, _ state.Trie) (state.Trie, error) { |
||||
return &odrTrie{db: db, id: StorageTrieID(db.id, address, root)}, nil |
||||
} |
||||
|
||||
func (db *odrDatabase) CopyTrie(t state.Trie) state.Trie { |
||||
switch t := t.(type) { |
||||
case *odrTrie: |
||||
cpy := &odrTrie{db: t.db, id: t.id} |
||||
if t.trie != nil { |
||||
cpy.trie = t.trie.Copy() |
||||
} |
||||
return cpy |
||||
default: |
||||
panic(fmt.Errorf("unknown trie type %T", t)) |
||||
} |
||||
} |
||||
|
||||
func (db *odrDatabase) ContractCode(addr common.Address, codeHash common.Hash) ([]byte, error) { |
||||
if codeHash == sha3Nil { |
||||
return nil, nil |
||||
} |
||||
code := rawdb.ReadCode(db.backend.Database(), codeHash) |
||||
if len(code) != 0 { |
||||
return code, nil |
||||
} |
||||
id := *db.id |
||||
id.AccountAddress = addr[:] |
||||
req := &CodeRequest{Id: &id, Hash: codeHash} |
||||
err := db.backend.Retrieve(db.ctx, req) |
||||
return req.Data, err |
||||
} |
||||
|
||||
func (db *odrDatabase) ContractCodeSize(addr common.Address, codeHash common.Hash) (int, error) { |
||||
code, err := db.ContractCode(addr, codeHash) |
||||
return len(code), err |
||||
} |
||||
|
||||
func (db *odrDatabase) TrieDB() *trie.Database { |
||||
return nil |
||||
} |
||||
|
||||
func (db *odrDatabase) DiskDB() ethdb.KeyValueStore { |
||||
panic("not implemented") |
||||
} |
||||
|
||||
type odrTrie struct { |
||||
db *odrDatabase |
||||
id *TrieID |
||||
trie *trie.Trie |
||||
} |
||||
|
||||
func (t *odrTrie) GetStorage(_ common.Address, key []byte) ([]byte, error) { |
||||
key = crypto.Keccak256(key) |
||||
var enc []byte |
||||
err := t.do(key, func() (err error) { |
||||
enc, err = t.trie.Get(key) |
||||
return err |
||||
}) |
||||
if err != nil || len(enc) == 0 { |
||||
return nil, err |
||||
} |
||||
_, content, _, err := rlp.Split(enc) |
||||
return content, err |
||||
} |
||||
|
||||
func (t *odrTrie) GetAccount(address common.Address) (*types.StateAccount, error) { |
||||
var ( |
||||
enc []byte |
||||
key = crypto.Keccak256(address.Bytes()) |
||||
) |
||||
err := t.do(key, func() (err error) { |
||||
enc, err = t.trie.Get(key) |
||||
return err |
||||
}) |
||||
if err != nil || len(enc) == 0 { |
||||
return nil, err |
||||
} |
||||
acct := new(types.StateAccount) |
||||
if err := rlp.DecodeBytes(enc, acct); err != nil { |
||||
return nil, err |
||||
} |
||||
return acct, nil |
||||
} |
||||
|
||||
func (t *odrTrie) UpdateAccount(address common.Address, acc *types.StateAccount) error { |
||||
key := crypto.Keccak256(address.Bytes()) |
||||
value, err := rlp.EncodeToBytes(acc) |
||||
if err != nil { |
||||
return fmt.Errorf("decoding error in account update: %w", err) |
||||
} |
||||
return t.do(key, func() error { |
||||
return t.trie.Update(key, value) |
||||
}) |
||||
} |
||||
|
||||
func (t *odrTrie) UpdateContractCode(_ common.Address, _ common.Hash, _ []byte) error { |
||||
return nil |
||||
} |
||||
|
||||
func (t *odrTrie) UpdateStorage(_ common.Address, key, value []byte) error { |
||||
key = crypto.Keccak256(key) |
||||
v, _ := rlp.EncodeToBytes(value) |
||||
return t.do(key, func() error { |
||||
return t.trie.Update(key, v) |
||||
}) |
||||
} |
||||
|
||||
func (t *odrTrie) DeleteStorage(_ common.Address, key []byte) error { |
||||
key = crypto.Keccak256(key) |
||||
return t.do(key, func() error { |
||||
return t.trie.Delete(key) |
||||
}) |
||||
} |
||||
|
||||
// DeleteAccount abstracts an account deletion from the trie.
|
||||
func (t *odrTrie) DeleteAccount(address common.Address) error { |
||||
key := crypto.Keccak256(address.Bytes()) |
||||
return t.do(key, func() error { |
||||
return t.trie.Delete(key) |
||||
}) |
||||
} |
||||
|
||||
func (t *odrTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) { |
||||
if t.trie == nil { |
||||
return t.id.Root, nil, nil |
||||
} |
||||
return t.trie.Commit(collectLeaf) |
||||
} |
||||
|
||||
func (t *odrTrie) Hash() common.Hash { |
||||
if t.trie == nil { |
||||
return t.id.Root |
||||
} |
||||
return t.trie.Hash() |
||||
} |
||||
|
||||
func (t *odrTrie) NodeIterator(startkey []byte) (trie.NodeIterator, error) { |
||||
return newNodeIterator(t, startkey), nil |
||||
} |
||||
|
||||
func (t *odrTrie) GetKey(sha []byte) []byte { |
||||
return nil |
||||
} |
||||
|
||||
func (t *odrTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error { |
||||
return errors.New("not implemented, needs client/server interface split") |
||||
} |
||||
|
||||
// do tries and retries to execute a function until it returns with no error or
|
||||
// an error type other than MissingNodeError
|
||||
func (t *odrTrie) do(key []byte, fn func() error) error { |
||||
for { |
||||
var err error |
||||
if t.trie == nil { |
||||
var id *trie.ID |
||||
if len(t.id.AccountAddress) > 0 { |
||||
id = trie.StorageTrieID(t.id.StateRoot, crypto.Keccak256Hash(t.id.AccountAddress), t.id.Root) |
||||
} else { |
||||
id = trie.StateTrieID(t.id.StateRoot) |
||||
} |
||||
triedb := trie.NewDatabase(t.db.backend.Database(), trie.HashDefaults) |
||||
t.trie, err = trie.New(id, triedb) |
||||
} |
||||
if err == nil { |
||||
err = fn() |
||||
} |
||||
if _, ok := err.(*trie.MissingNodeError); !ok { |
||||
return err |
||||
} |
||||
r := &TrieRequest{Id: t.id, Key: key} |
||||
if err := t.db.backend.Retrieve(t.db.ctx, r); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
} |
||||
|
||||
type nodeIterator struct { |
||||
trie.NodeIterator |
||||
t *odrTrie |
||||
err error |
||||
} |
||||
|
||||
func newNodeIterator(t *odrTrie, startkey []byte) trie.NodeIterator { |
||||
it := &nodeIterator{t: t} |
||||
// Open the actual non-ODR trie if that hasn't happened yet.
|
||||
if t.trie == nil { |
||||
it.do(func() error { |
||||
var id *trie.ID |
||||
if len(t.id.AccountAddress) > 0 { |
||||
id = trie.StorageTrieID(t.id.StateRoot, crypto.Keccak256Hash(t.id.AccountAddress), t.id.Root) |
||||
} else { |
||||
id = trie.StateTrieID(t.id.StateRoot) |
||||
} |
||||
triedb := trie.NewDatabase(t.db.backend.Database(), trie.HashDefaults) |
||||
t, err := trie.New(id, triedb) |
||||
if err == nil { |
||||
it.t.trie = t |
||||
} |
||||
return err |
||||
}) |
||||
} |
||||
it.do(func() error { |
||||
var err error |
||||
it.NodeIterator, err = it.t.trie.NodeIterator(startkey) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
return it.NodeIterator.Error() |
||||
}) |
||||
return it |
||||
} |
||||
|
||||
func (it *nodeIterator) Next(descend bool) bool { |
||||
var ok bool |
||||
it.do(func() error { |
||||
ok = it.NodeIterator.Next(descend) |
||||
return it.NodeIterator.Error() |
||||
}) |
||||
return ok |
||||
} |
||||
|
||||
// do runs fn and attempts to fill in missing nodes by retrieving.
|
||||
func (it *nodeIterator) do(fn func() error) { |
||||
var lasthash common.Hash |
||||
for { |
||||
it.err = fn() |
||||
missing, ok := it.err.(*trie.MissingNodeError) |
||||
if !ok { |
||||
return |
||||
} |
||||
if missing.NodeHash == lasthash { |
||||
it.err = fmt.Errorf("retrieve loop for trie node %x", missing.NodeHash) |
||||
return |
||||
} |
||||
lasthash = missing.NodeHash |
||||
r := &TrieRequest{Id: it.t.id, Key: nibblesToKey(missing.Path)} |
||||
if it.err = it.t.db.backend.Retrieve(it.t.db.ctx, r); it.err != nil { |
||||
return |
||||
} |
||||
} |
||||
} |
||||
|
||||
func (it *nodeIterator) Error() error { |
||||
if it.err != nil { |
||||
return it.err |
||||
} |
||||
return it.NodeIterator.Error() |
||||
} |
||||
|
||||
func nibblesToKey(nib []byte) []byte { |
||||
if len(nib) > 0 && nib[len(nib)-1] == 0x10 { |
||||
nib = nib[:len(nib)-1] // drop terminator
|
||||
} |
||||
if len(nib)&1 == 1 { |
||||
nib = append(nib, 0) // make even
|
||||
} |
||||
key := make([]byte, len(nib)/2) |
||||
for bi, ni := 0, 0; ni < len(nib); bi, ni = bi+1, ni+2 { |
||||
key[bi] = nib[ni]<<4 | nib[ni+1] |
||||
} |
||||
return key |
||||
} |
@ -1,95 +0,0 @@ |
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package light |
||||
|
||||
import ( |
||||
"bytes" |
||||
"context" |
||||
"errors" |
||||
"fmt" |
||||
"math/big" |
||||
"testing" |
||||
|
||||
"github.com/davecgh/go-spew/spew" |
||||
"github.com/ethereum/go-ethereum/consensus/ethash" |
||||
"github.com/ethereum/go-ethereum/core" |
||||
"github.com/ethereum/go-ethereum/core/rawdb" |
||||
"github.com/ethereum/go-ethereum/core/state" |
||||
"github.com/ethereum/go-ethereum/core/vm" |
||||
"github.com/ethereum/go-ethereum/params" |
||||
"github.com/ethereum/go-ethereum/trie" |
||||
) |
||||
|
||||
func TestNodeIterator(t *testing.T) { |
||||
var ( |
||||
fulldb = rawdb.NewMemoryDatabase() |
||||
lightdb = rawdb.NewMemoryDatabase() |
||||
gspec = &core.Genesis{ |
||||
Config: params.TestChainConfig, |
||||
Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}}, |
||||
BaseFee: big.NewInt(params.InitialBaseFee), |
||||
} |
||||
) |
||||
blockchain, _ := core.NewBlockChain(fulldb, nil, gspec, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil) |
||||
_, gchain, _ := core.GenerateChainWithGenesis(gspec, ethash.NewFaker(), 4, testChainGen) |
||||
if _, err := blockchain.InsertChain(gchain); err != nil { |
||||
panic(err) |
||||
} |
||||
|
||||
gspec.MustCommit(lightdb, trie.NewDatabase(lightdb, trie.HashDefaults)) |
||||
ctx := context.Background() |
||||
odr := &testOdr{sdb: fulldb, ldb: lightdb, serverState: blockchain.StateCache(), indexerConfig: TestClientIndexerConfig} |
||||
head := blockchain.CurrentHeader() |
||||
lightTrie, _ := NewStateDatabase(ctx, head, odr).OpenTrie(head.Root) |
||||
fullTrie, _ := blockchain.StateCache().OpenTrie(head.Root) |
||||
if err := diffTries(fullTrie, lightTrie); err != nil { |
||||
t.Fatal(err) |
||||
} |
||||
} |
||||
|
||||
func diffTries(t1, t2 state.Trie) error { |
||||
trieIt1, err := t1.NodeIterator(nil) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
trieIt2, err := t2.NodeIterator(nil) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
i1 := trie.NewIterator(trieIt1) |
||||
i2 := trie.NewIterator(trieIt2) |
||||
for i1.Next() && i2.Next() { |
||||
if !bytes.Equal(i1.Key, i2.Key) { |
||||
spew.Dump(i2) |
||||
return fmt.Errorf("tries have different keys %x, %x", i1.Key, i2.Key) |
||||
} |
||||
if !bytes.Equal(i1.Value, i2.Value) { |
||||
return fmt.Errorf("tries differ at key %x", i1.Key) |
||||
} |
||||
} |
||||
switch { |
||||
case i1.Err != nil: |
||||
return fmt.Errorf("full trie iterator error: %v", i1.Err) |
||||
case i2.Err != nil: |
||||
return fmt.Errorf("light trie iterator error: %v", i2.Err) |
||||
case i1.Next(): |
||||
return errors.New("full trie iterator has more k/v pairs") |
||||
case i2.Next(): |
||||
return errors.New("light trie iterator has more k/v pairs") |
||||
} |
||||
return nil |
||||
} |
@ -1,556 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package light |
||||
|
||||
import ( |
||||
"context" |
||||
"fmt" |
||||
"math/big" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/core" |
||||
"github.com/ethereum/go-ethereum/core/rawdb" |
||||
"github.com/ethereum/go-ethereum/core/state" |
||||
"github.com/ethereum/go-ethereum/core/txpool" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/event" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/params" |
||||
) |
||||
|
||||
const ( |
||||
// chainHeadChanSize is the size of channel listening to ChainHeadEvent.
|
||||
chainHeadChanSize = 10 |
||||
) |
||||
|
||||
// txPermanent is the number of mined blocks after a mined transaction is
|
||||
// considered permanent and no rollback is expected
|
||||
var txPermanent = uint64(500) |
||||
|
||||
// TxPool implements the transaction pool for light clients, which keeps track
|
||||
// of the status of locally created transactions, detecting if they are included
|
||||
// in a block (mined) or rolled back. There are no queued transactions since we
|
||||
// always receive all locally signed transactions in the same order as they are
|
||||
// created.
|
||||
type TxPool struct { |
||||
config *params.ChainConfig |
||||
signer types.Signer |
||||
quit chan bool |
||||
txFeed event.Feed |
||||
scope event.SubscriptionScope |
||||
chainHeadCh chan core.ChainHeadEvent |
||||
chainHeadSub event.Subscription |
||||
mu sync.RWMutex |
||||
chain *LightChain |
||||
odr OdrBackend |
||||
chainDb ethdb.Database |
||||
relay TxRelayBackend |
||||
head common.Hash |
||||
nonce map[common.Address]uint64 // "pending" nonce
|
||||
pending map[common.Hash]*types.Transaction // pending transactions by tx hash
|
||||
mined map[common.Hash][]*types.Transaction // mined transactions by block hash
|
||||
clearIdx uint64 // earliest block nr that can contain mined tx info
|
||||
|
||||
istanbul bool // Fork indicator whether we are in the istanbul stage.
|
||||
eip2718 bool // Fork indicator whether we are in the eip2718 stage.
|
||||
shanghai bool // Fork indicator whether we are in the shanghai stage.
|
||||
} |
||||
|
||||
// TxRelayBackend provides an interface to the mechanism that forwards transactions to the
|
||||
// ETH network. The implementations of the functions should be non-blocking.
|
||||
//
|
||||
// Send instructs backend to forward new transactions NewHead notifies backend about a new
|
||||
// head after processed by the tx pool, including mined and rolled back transactions since
|
||||
// the last event.
|
||||
//
|
||||
// Discard notifies backend about transactions that should be discarded either because
|
||||
// they have been replaced by a re-send or because they have been mined long ago and no
|
||||
// rollback is expected.
|
||||
type TxRelayBackend interface { |
||||
Send(txs types.Transactions) |
||||
NewHead(head common.Hash, mined []common.Hash, rollback []common.Hash) |
||||
Discard(hashes []common.Hash) |
||||
} |
||||
|
||||
// NewTxPool creates a new light transaction pool
|
||||
func NewTxPool(config *params.ChainConfig, chain *LightChain, relay TxRelayBackend) *TxPool { |
||||
pool := &TxPool{ |
||||
config: config, |
||||
signer: types.LatestSigner(config), |
||||
nonce: make(map[common.Address]uint64), |
||||
pending: make(map[common.Hash]*types.Transaction), |
||||
mined: make(map[common.Hash][]*types.Transaction), |
||||
quit: make(chan bool), |
||||
chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), |
||||
chain: chain, |
||||
relay: relay, |
||||
odr: chain.Odr(), |
||||
chainDb: chain.Odr().Database(), |
||||
head: chain.CurrentHeader().Hash(), |
||||
clearIdx: chain.CurrentHeader().Number.Uint64(), |
||||
} |
||||
// Subscribe events from blockchain
|
||||
pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh) |
||||
go pool.eventLoop() |
||||
|
||||
return pool |
||||
} |
||||
|
||||
// currentState returns the light state of the current head header
|
||||
func (pool *TxPool) currentState(ctx context.Context) *state.StateDB { |
||||
return NewState(ctx, pool.chain.CurrentHeader(), pool.odr) |
||||
} |
||||
|
||||
// GetNonce returns the "pending" nonce of a given address. It always queries
|
||||
// the nonce belonging to the latest header too in order to detect if another
|
||||
// client using the same key sent a transaction.
|
||||
func (pool *TxPool) GetNonce(ctx context.Context, addr common.Address) (uint64, error) { |
||||
state := pool.currentState(ctx) |
||||
nonce := state.GetNonce(addr) |
||||
if state.Error() != nil { |
||||
return 0, state.Error() |
||||
} |
||||
sn, ok := pool.nonce[addr] |
||||
if ok && sn > nonce { |
||||
nonce = sn |
||||
} |
||||
if !ok || sn < nonce { |
||||
pool.nonce[addr] = nonce |
||||
} |
||||
return nonce, nil |
||||
} |
||||
|
||||
// txStateChanges stores the recent changes between pending/mined states of
|
||||
// transactions. True means mined, false means rolled back, no entry means no change
|
||||
type txStateChanges map[common.Hash]bool |
||||
|
||||
// setState sets the status of a tx to either recently mined or recently rolled back
|
||||
func (txc txStateChanges) setState(txHash common.Hash, mined bool) { |
||||
val, ent := txc[txHash] |
||||
if ent && (val != mined) { |
||||
delete(txc, txHash) |
||||
} else { |
||||
txc[txHash] = mined |
||||
} |
||||
} |
||||
|
||||
// getLists creates lists of mined and rolled back tx hashes
|
||||
func (txc txStateChanges) getLists() (mined []common.Hash, rollback []common.Hash) { |
||||
for hash, val := range txc { |
||||
if val { |
||||
mined = append(mined, hash) |
||||
} else { |
||||
rollback = append(rollback, hash) |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
// checkMinedTxs checks newly added blocks for the currently pending transactions
|
||||
// and marks them as mined if necessary. It also stores block position in the db
|
||||
// and adds them to the received txStateChanges map.
|
||||
func (pool *TxPool) checkMinedTxs(ctx context.Context, hash common.Hash, number uint64, txc txStateChanges) error { |
||||
// If no transactions are pending, we don't care about anything
|
||||
if len(pool.pending) == 0 { |
||||
return nil |
||||
} |
||||
block, err := GetBlock(ctx, pool.odr, hash, number) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
// Gather all the local transaction mined in this block
|
||||
list := pool.mined[hash] |
||||
for _, tx := range block.Transactions() { |
||||
if _, ok := pool.pending[tx.Hash()]; ok { |
||||
list = append(list, tx) |
||||
} |
||||
} |
||||
// If some transactions have been mined, write the needed data to disk and update
|
||||
if list != nil { |
||||
// Retrieve all the receipts belonging to this block and write the lookup table
|
||||
if _, err := GetBlockReceipts(ctx, pool.odr, hash, number); err != nil { // ODR caches, ignore results
|
||||
return err |
||||
} |
||||
rawdb.WriteTxLookupEntriesByBlock(pool.chainDb, block) |
||||
|
||||
// Update the transaction pool's state
|
||||
for _, tx := range list { |
||||
delete(pool.pending, tx.Hash()) |
||||
txc.setState(tx.Hash(), true) |
||||
} |
||||
pool.mined[hash] = list |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// rollbackTxs marks the transactions contained in recently rolled back blocks
|
||||
// as rolled back. It also removes any positional lookup entries.
|
||||
func (pool *TxPool) rollbackTxs(hash common.Hash, txc txStateChanges) { |
||||
batch := pool.chainDb.NewBatch() |
||||
if list, ok := pool.mined[hash]; ok { |
||||
for _, tx := range list { |
||||
txHash := tx.Hash() |
||||
rawdb.DeleteTxLookupEntry(batch, txHash) |
||||
pool.pending[txHash] = tx |
||||
txc.setState(txHash, false) |
||||
} |
||||
delete(pool.mined, hash) |
||||
} |
||||
batch.Write() |
||||
} |
||||
|
||||
// reorgOnNewHead sets a new head header, processing (and rolling back if necessary)
|
||||
// the blocks since the last known head and returns a txStateChanges map containing
|
||||
// the recently mined and rolled back transaction hashes. If an error (context
|
||||
// timeout) occurs during checking new blocks, it leaves the locally known head
|
||||
// at the latest checked block and still returns a valid txStateChanges, making it
|
||||
// possible to continue checking the missing blocks at the next chain head event
|
||||
func (pool *TxPool) reorgOnNewHead(ctx context.Context, newHeader *types.Header) (txStateChanges, error) { |
||||
txc := make(txStateChanges) |
||||
oldh := pool.chain.GetHeaderByHash(pool.head) |
||||
newh := newHeader |
||||
// find common ancestor, create list of rolled back and new block hashes
|
||||
var oldHashes, newHashes []common.Hash |
||||
for oldh.Hash() != newh.Hash() { |
||||
if oldh.Number.Uint64() >= newh.Number.Uint64() { |
||||
oldHashes = append(oldHashes, oldh.Hash()) |
||||
oldh = pool.chain.GetHeader(oldh.ParentHash, oldh.Number.Uint64()-1) |
||||
} |
||||
if oldh.Number.Uint64() < newh.Number.Uint64() { |
||||
newHashes = append(newHashes, newh.Hash()) |
||||
newh = pool.chain.GetHeader(newh.ParentHash, newh.Number.Uint64()-1) |
||||
if newh == nil { |
||||
// happens when CHT syncing, nothing to do
|
||||
newh = oldh |
||||
} |
||||
} |
||||
} |
||||
if oldh.Number.Uint64() < pool.clearIdx { |
||||
pool.clearIdx = oldh.Number.Uint64() |
||||
} |
||||
// roll back old blocks
|
||||
for _, hash := range oldHashes { |
||||
pool.rollbackTxs(hash, txc) |
||||
} |
||||
pool.head = oldh.Hash() |
||||
// check mined txs of new blocks (array is in reversed order)
|
||||
for i := len(newHashes) - 1; i >= 0; i-- { |
||||
hash := newHashes[i] |
||||
if err := pool.checkMinedTxs(ctx, hash, newHeader.Number.Uint64()-uint64(i), txc); err != nil { |
||||
return txc, err |
||||
} |
||||
pool.head = hash |
||||
} |
||||
|
||||
// clear old mined tx entries of old blocks
|
||||
if idx := newHeader.Number.Uint64(); idx > pool.clearIdx+txPermanent { |
||||
idx2 := idx - txPermanent |
||||
if len(pool.mined) > 0 { |
||||
for i := pool.clearIdx; i < idx2; i++ { |
||||
hash := rawdb.ReadCanonicalHash(pool.chainDb, i) |
||||
if list, ok := pool.mined[hash]; ok { |
||||
hashes := make([]common.Hash, len(list)) |
||||
for i, tx := range list { |
||||
hashes[i] = tx.Hash() |
||||
} |
||||
pool.relay.Discard(hashes) |
||||
delete(pool.mined, hash) |
||||
} |
||||
} |
||||
} |
||||
pool.clearIdx = idx2 |
||||
} |
||||
|
||||
return txc, nil |
||||
} |
||||
|
||||
// blockCheckTimeout is the time limit for checking new blocks for mined
|
||||
// transactions. Checking resumes at the next chain head event if timed out.
|
||||
const blockCheckTimeout = time.Second * 3 |
||||
|
||||
// eventLoop processes chain head events and also notifies the tx relay backend
|
||||
// about the new head hash and tx state changes
|
||||
func (pool *TxPool) eventLoop() { |
||||
for { |
||||
select { |
||||
case ev := <-pool.chainHeadCh: |
||||
pool.setNewHead(ev.Block.Header()) |
||||
// hack in order to avoid hogging the lock; this part will
|
||||
// be replaced by a subsequent PR.
|
||||
time.Sleep(time.Millisecond) |
||||
|
||||
// System stopped
|
||||
case <-pool.chainHeadSub.Err(): |
||||
return |
||||
} |
||||
} |
||||
} |
||||
|
||||
func (pool *TxPool) setNewHead(head *types.Header) { |
||||
pool.mu.Lock() |
||||
defer pool.mu.Unlock() |
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), blockCheckTimeout) |
||||
defer cancel() |
||||
|
||||
txc, _ := pool.reorgOnNewHead(ctx, head) |
||||
m, r := txc.getLists() |
||||
pool.relay.NewHead(pool.head, m, r) |
||||
|
||||
// Update fork indicator by next pending block number
|
||||
next := new(big.Int).Add(head.Number, big.NewInt(1)) |
||||
pool.istanbul = pool.config.IsIstanbul(next) |
||||
pool.eip2718 = pool.config.IsBerlin(next) |
||||
pool.shanghai = pool.config.IsShanghai(next, uint64(time.Now().Unix())) |
||||
} |
||||
|
||||
// Stop stops the light transaction pool
|
||||
func (pool *TxPool) Stop() { |
||||
// Unsubscribe all subscriptions registered from txpool
|
||||
pool.scope.Close() |
||||
// Unsubscribe subscriptions registered from blockchain
|
||||
pool.chainHeadSub.Unsubscribe() |
||||
close(pool.quit) |
||||
log.Info("Transaction pool stopped") |
||||
} |
||||
|
||||
// SubscribeNewTxsEvent registers a subscription of core.NewTxsEvent and
|
||||
// starts sending event to the given channel.
|
||||
func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { |
||||
return pool.scope.Track(pool.txFeed.Subscribe(ch)) |
||||
} |
||||
|
||||
// Stats returns the number of currently pending (locally created) transactions
|
||||
func (pool *TxPool) Stats() (pending int) { |
||||
pool.mu.RLock() |
||||
defer pool.mu.RUnlock() |
||||
|
||||
pending = len(pool.pending) |
||||
return |
||||
} |
||||
|
||||
// validateTx checks whether a transaction is valid according to the consensus rules.
|
||||
func (pool *TxPool) validateTx(ctx context.Context, tx *types.Transaction) error { |
||||
// Validate sender
|
||||
var ( |
||||
from common.Address |
||||
err error |
||||
) |
||||
|
||||
// Validate the transaction sender and it's sig. Throw
|
||||
// if the from fields is invalid.
|
||||
if from, err = types.Sender(pool.signer, tx); err != nil { |
||||
return txpool.ErrInvalidSender |
||||
} |
||||
// Last but not least check for nonce errors
|
||||
currentState := pool.currentState(ctx) |
||||
if n := currentState.GetNonce(from); n > tx.Nonce() { |
||||
return core.ErrNonceTooLow |
||||
} |
||||
|
||||
// Check the transaction doesn't exceed the current
|
||||
// block limit gas.
|
||||
header := pool.chain.GetHeaderByHash(pool.head) |
||||
if header.GasLimit < tx.Gas() { |
||||
return txpool.ErrGasLimit |
||||
} |
||||
|
||||
// Transactions can't be negative. This may never happen
|
||||
// using RLP decoded transactions but may occur if you create
|
||||
// a transaction using the RPC for example.
|
||||
if tx.Value().Sign() < 0 { |
||||
return txpool.ErrNegativeValue |
||||
} |
||||
|
||||
// Transactor should have enough funds to cover the costs
|
||||
// cost == V + GP * GL
|
||||
if b := currentState.GetBalance(from); b.Cmp(tx.Cost()) < 0 { |
||||
return core.ErrInsufficientFunds |
||||
} |
||||
|
||||
// Should supply enough intrinsic gas
|
||||
gas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul, pool.shanghai) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
if tx.Gas() < gas { |
||||
return core.ErrIntrinsicGas |
||||
} |
||||
return currentState.Error() |
||||
} |
||||
|
||||
// add validates a new transaction and sets its state pending if processable.
|
||||
// It also updates the locally stored nonce if necessary.
|
||||
func (pool *TxPool) add(ctx context.Context, tx *types.Transaction) error { |
||||
hash := tx.Hash() |
||||
|
||||
if pool.pending[hash] != nil { |
||||
return fmt.Errorf("known transaction (%x)", hash[:4]) |
||||
} |
||||
err := pool.validateTx(ctx, tx) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
if _, ok := pool.pending[hash]; !ok { |
||||
pool.pending[hash] = tx |
||||
|
||||
nonce := tx.Nonce() + 1 |
||||
|
||||
addr, _ := types.Sender(pool.signer, tx) |
||||
if nonce > pool.nonce[addr] { |
||||
pool.nonce[addr] = nonce |
||||
} |
||||
|
||||
// Notify the subscribers. This event is posted in a goroutine
|
||||
// because it's possible that somewhere during the post "Remove transaction"
|
||||
// gets called which will then wait for the global tx pool lock and deadlock.
|
||||
go pool.txFeed.Send(core.NewTxsEvent{Txs: types.Transactions{tx}}) |
||||
} |
||||
|
||||
// Print a log message if low enough level is set
|
||||
log.Debug("Pooled new transaction", "hash", hash, "from", log.Lazy{Fn: func() common.Address { from, _ := types.Sender(pool.signer, tx); return from }}, "to", tx.To()) |
||||
return nil |
||||
} |
||||
|
||||
// Add adds a transaction to the pool if valid and passes it to the tx relay
|
||||
// backend
|
||||
func (pool *TxPool) Add(ctx context.Context, tx *types.Transaction) error { |
||||
pool.mu.Lock() |
||||
defer pool.mu.Unlock() |
||||
data, err := tx.MarshalBinary() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
if err := pool.add(ctx, tx); err != nil { |
||||
return err |
||||
} |
||||
//fmt.Println("Send", tx.Hash())
|
||||
pool.relay.Send(types.Transactions{tx}) |
||||
|
||||
pool.chainDb.Put(tx.Hash().Bytes(), data) |
||||
return nil |
||||
} |
||||
|
||||
// AddBatch adds all valid transactions to the pool and passes them to
|
||||
// the tx relay backend
|
||||
func (pool *TxPool) AddBatch(ctx context.Context, txs []*types.Transaction) { |
||||
pool.mu.Lock() |
||||
defer pool.mu.Unlock() |
||||
var sendTx types.Transactions |
||||
|
||||
for _, tx := range txs { |
||||
if err := pool.add(ctx, tx); err == nil { |
||||
sendTx = append(sendTx, tx) |
||||
} |
||||
} |
||||
if len(sendTx) > 0 { |
||||
pool.relay.Send(sendTx) |
||||
} |
||||
} |
||||
|
||||
// GetTransaction returns a transaction if it is contained in the pool
|
||||
// and nil otherwise.
|
||||
func (pool *TxPool) GetTransaction(hash common.Hash) *types.Transaction { |
||||
// check the txs first
|
||||
if tx, ok := pool.pending[hash]; ok { |
||||
return tx |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// GetTransactions returns all currently processable transactions.
|
||||
// The returned slice may be modified by the caller.
|
||||
func (pool *TxPool) GetTransactions() (txs types.Transactions, err error) { |
||||
pool.mu.RLock() |
||||
defer pool.mu.RUnlock() |
||||
|
||||
txs = make(types.Transactions, len(pool.pending)) |
||||
i := 0 |
||||
for _, tx := range pool.pending { |
||||
txs[i] = tx |
||||
i++ |
||||
} |
||||
return txs, nil |
||||
} |
||||
|
||||
// Content retrieves the data content of the transaction pool, returning all the
|
||||
// pending as well as queued transactions, grouped by account and nonce.
|
||||
func (pool *TxPool) Content() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) { |
||||
pool.mu.RLock() |
||||
defer pool.mu.RUnlock() |
||||
|
||||
// Retrieve all the pending transactions and sort by account and by nonce
|
||||
pending := make(map[common.Address][]*types.Transaction) |
||||
for _, tx := range pool.pending { |
||||
account, _ := types.Sender(pool.signer, tx) |
||||
pending[account] = append(pending[account], tx) |
||||
} |
||||
// There are no queued transactions in a light pool, just return an empty map
|
||||
queued := make(map[common.Address][]*types.Transaction) |
||||
return pending, queued |
||||
} |
||||
|
||||
// ContentFrom retrieves the data content of the transaction pool, returning the
|
||||
// pending as well as queued transactions of this address, grouped by nonce.
|
||||
func (pool *TxPool) ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) { |
||||
pool.mu.RLock() |
||||
defer pool.mu.RUnlock() |
||||
|
||||
// Retrieve the pending transactions and sort by nonce
|
||||
var pending []*types.Transaction |
||||
for _, tx := range pool.pending { |
||||
account, _ := types.Sender(pool.signer, tx) |
||||
if account != addr { |
||||
continue |
||||
} |
||||
pending = append(pending, tx) |
||||
} |
||||
// There are no queued transactions in a light pool, just return an empty map
|
||||
return pending, []*types.Transaction{} |
||||
} |
||||
|
||||
// RemoveTransactions removes all given transactions from the pool.
|
||||
func (pool *TxPool) RemoveTransactions(txs types.Transactions) { |
||||
pool.mu.Lock() |
||||
defer pool.mu.Unlock() |
||||
|
||||
var hashes []common.Hash |
||||
batch := pool.chainDb.NewBatch() |
||||
for _, tx := range txs { |
||||
hash := tx.Hash() |
||||
delete(pool.pending, hash) |
||||
batch.Delete(hash.Bytes()) |
||||
hashes = append(hashes, hash) |
||||
} |
||||
batch.Write() |
||||
pool.relay.Discard(hashes) |
||||
} |
||||
|
||||
// RemoveTx removes the transaction with the given hash from the pool.
|
||||
func (pool *TxPool) RemoveTx(hash common.Hash) { |
||||
pool.mu.Lock() |
||||
defer pool.mu.Unlock() |
||||
// delete from pending pool
|
||||
delete(pool.pending, hash) |
||||
pool.chainDb.Delete(hash[:]) |
||||
pool.relay.Discard([]common.Hash{hash}) |
||||
} |
@ -1,147 +0,0 @@ |
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package light |
||||
|
||||
import ( |
||||
"context" |
||||
"math" |
||||
"math/big" |
||||
"testing" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/consensus/ethash" |
||||
"github.com/ethereum/go-ethereum/core" |
||||
"github.com/ethereum/go-ethereum/core/rawdb" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/core/vm" |
||||
"github.com/ethereum/go-ethereum/params" |
||||
"github.com/ethereum/go-ethereum/trie" |
||||
) |
||||
|
||||
type testTxRelay struct { |
||||
send, discard, mined chan int |
||||
} |
||||
|
||||
func (r *testTxRelay) Send(txs types.Transactions) { |
||||
r.send <- len(txs) |
||||
} |
||||
|
||||
func (r *testTxRelay) NewHead(head common.Hash, mined []common.Hash, rollback []common.Hash) { |
||||
m := len(mined) |
||||
if m != 0 { |
||||
r.mined <- m |
||||
} |
||||
} |
||||
|
||||
func (r *testTxRelay) Discard(hashes []common.Hash) { |
||||
r.discard <- len(hashes) |
||||
} |
||||
|
||||
const poolTestTxs = 1000 |
||||
const poolTestBlocks = 100 |
||||
|
||||
// test tx 0..n-1
|
||||
var testTx [poolTestTxs]*types.Transaction |
||||
|
||||
// txs sent before block i
|
||||
func sentTx(i int) int { |
||||
return int(math.Pow(float64(i)/float64(poolTestBlocks), 0.9) * poolTestTxs) |
||||
} |
||||
|
||||
// txs included in block i or before that (minedTx(i) <= sentTx(i))
|
||||
func minedTx(i int) int { |
||||
return int(math.Pow(float64(i)/float64(poolTestBlocks), 1.1) * poolTestTxs) |
||||
} |
||||
|
||||
func txPoolTestChainGen(i int, block *core.BlockGen) { |
||||
s := minedTx(i) |
||||
e := minedTx(i + 1) |
||||
for i := s; i < e; i++ { |
||||
block.AddTx(testTx[i]) |
||||
} |
||||
} |
||||
|
||||
func TestTxPool(t *testing.T) { |
||||
for i := range testTx { |
||||
testTx[i], _ = types.SignTx(types.NewTransaction(uint64(i), acc1Addr, big.NewInt(10000), params.TxGas, big.NewInt(params.InitialBaseFee), nil), types.HomesteadSigner{}, testBankKey) |
||||
} |
||||
|
||||
var ( |
||||
sdb = rawdb.NewMemoryDatabase() |
||||
ldb = rawdb.NewMemoryDatabase() |
||||
gspec = &core.Genesis{ |
||||
Config: params.TestChainConfig, |
||||
Alloc: core.GenesisAlloc{testBankAddress: {Balance: testBankFunds}}, |
||||
BaseFee: big.NewInt(params.InitialBaseFee), |
||||
} |
||||
) |
||||
// Assemble the test environment
|
||||
blockchain, _ := core.NewBlockChain(sdb, nil, gspec, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil) |
||||
_, gchain, _ := core.GenerateChainWithGenesis(gspec, ethash.NewFaker(), poolTestBlocks, txPoolTestChainGen) |
||||
if _, err := blockchain.InsertChain(gchain); err != nil { |
||||
panic(err) |
||||
} |
||||
|
||||
gspec.MustCommit(ldb, trie.NewDatabase(ldb, trie.HashDefaults)) |
||||
odr := &testOdr{sdb: sdb, ldb: ldb, serverState: blockchain.StateCache(), indexerConfig: TestClientIndexerConfig} |
||||
relay := &testTxRelay{ |
||||
send: make(chan int, 1), |
||||
discard: make(chan int, 1), |
||||
mined: make(chan int, 1), |
||||
} |
||||
lightchain, _ := NewLightChain(odr, params.TestChainConfig, ethash.NewFullFaker()) |
||||
txPermanent = 50 |
||||
pool := NewTxPool(params.TestChainConfig, lightchain, relay) |
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) |
||||
defer cancel() |
||||
|
||||
for ii, block := range gchain { |
||||
i := ii + 1 |
||||
s := sentTx(i - 1) |
||||
e := sentTx(i) |
||||
for i := s; i < e; i++ { |
||||
pool.Add(ctx, testTx[i]) |
||||
got := <-relay.send |
||||
exp := 1 |
||||
if got != exp { |
||||
t.Errorf("relay.Send expected len = %d, got %d", exp, got) |
||||
} |
||||
} |
||||
|
||||
if _, err := lightchain.InsertHeaderChain([]*types.Header{block.Header()}); err != nil { |
||||
panic(err) |
||||
} |
||||
|
||||
got := <-relay.mined |
||||
exp := minedTx(i) - minedTx(i-1) |
||||
if got != exp { |
||||
t.Errorf("relay.NewHead expected len(mined) = %d, got %d", exp, got) |
||||
} |
||||
|
||||
exp = 0 |
||||
if i > int(txPermanent)+1 { |
||||
exp = minedTx(i-int(txPermanent)-1) - minedTx(i-int(txPermanent)-2) |
||||
} |
||||
if exp != 0 { |
||||
got = <-relay.discard |
||||
if got != exp { |
||||
t.Errorf("relay.Discard expected len = %d, got %d", exp, got) |
||||
} |
||||
} |
||||
} |
||||
} |
Loading…
Reference in new issue