Merge pull request #14631 from zsfelfoldi/bloombits2

core/bloombits, eth/filter: transformed bloom bitmap based log search
pull/15103/head
Péter Szilágyi 7 years ago committed by GitHub
commit c4d21bc8e5
  1. 15
      core/blockchain.go
  2. 18
      core/bloombits/doc.go
  3. 87
      core/bloombits/generator.go
  4. 60
      core/bloombits/generator_test.go
  5. 615
      core/bloombits/matcher.go
  6. 242
      core/bloombits/matcher_test.go
  7. 181
      core/bloombits/scheduler.go
  8. 105
      core/bloombits/scheduler_test.go
  9. 45
      core/chain_indexer.go
  10. 4
      core/chain_indexer_test.go
  11. 183
      core/database_util.go
  12. 108
      core/database_util_test.go
  13. 14
      core/types/bloom9.go
  14. 12
      eth/api_backend.go
  15. 19
      eth/backend.go
  16. 74
      eth/backend_test.go
  17. 142
      eth/bloombits.go
  18. 44
      eth/db_upgrade.go
  19. 47
      eth/filters/api.go
  20. 201
      eth/filters/bench_test.go
  21. 247
      eth/filters/filter.go
  22. 46
      eth/filters/filter_system_test.go
  23. 84
      eth/filters/filter_test.go
  24. 8
      les/api_backend.go
  25. 2
      miner/worker.go
  26. 26
      params/network_params.go

@ -759,12 +759,6 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
log.Crit("Failed to write block receipts", "err", err) log.Crit("Failed to write block receipts", "err", err)
return return
} }
if err := WriteMipmapBloom(bc.chainDb, block.NumberU64(), receipts); err != nil {
errs[index] = fmt.Errorf("failed to write log blooms: %v", err)
atomic.AddInt32(&failed, 1)
log.Crit("Failed to write log blooms", "err", err)
return
}
if err := WriteTxLookupEntries(bc.chainDb, block); err != nil { if err := WriteTxLookupEntries(bc.chainDb, block); err != nil {
errs[index] = fmt.Errorf("failed to write lookup metadata: %v", err) errs[index] = fmt.Errorf("failed to write lookup metadata: %v", err)
atomic.AddInt32(&failed, 1) atomic.AddInt32(&failed, 1)
@ -1017,10 +1011,6 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
if err := WriteTxLookupEntries(bc.chainDb, block); err != nil { if err := WriteTxLookupEntries(bc.chainDb, block); err != nil {
return i, err return i, err
} }
// Write map map bloom filters
if err := WriteMipmapBloom(bc.chainDb, block.NumberU64(), receipts); err != nil {
return i, err
}
// Write hash preimages // Write hash preimages
if err := WritePreimages(bc.chainDb, block.NumberU64(), state.Preimages()); err != nil { if err := WritePreimages(bc.chainDb, block.NumberU64(), state.Preimages()); err != nil {
return i, err return i, err
@ -1178,11 +1168,6 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
if err := WriteTxLookupEntries(bc.chainDb, block); err != nil { if err := WriteTxLookupEntries(bc.chainDb, block); err != nil {
return err return err
} }
// Write map map bloom filters
receipts := GetBlockReceipts(bc.chainDb, block.Hash(), block.NumberU64())
if err := WriteMipmapBloom(bc.chainDb, block.NumberU64(), receipts); err != nil {
return err
}
addedTxs = append(addedTxs, block.Transactions()...) addedTxs = append(addedTxs, block.Transactions()...)
} }

@ -0,0 +1,18 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package bloombits implements bloom filtering on batches of data.
package bloombits

@ -0,0 +1,87 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bloombits
import (
"errors"
"github.com/ethereum/go-ethereum/core/types"
)
// errSectionOutOfBounds is returned if the user tried to add more bloom filters
// to the batch than available space, or if tries to retrieve above the capacity,
var errSectionOutOfBounds = errors.New("section out of bounds")
// Generator takes a number of bloom filters and generates the rotated bloom bits
// to be used for batched filtering.
type Generator struct {
blooms [types.BloomBitLength][]byte // Rotated blooms for per-bit matching
sections uint // Number of sections to batch together
nextBit uint // Next bit to set when adding a bloom
}
// NewGenerator creates a rotated bloom generator that can iteratively fill a
// batched bloom filter's bits.
func NewGenerator(sections uint) (*Generator, error) {
if sections%8 != 0 {
return nil, errors.New("section count not multiple of 8")
}
b := &Generator{sections: sections}
for i := 0; i < types.BloomBitLength; i++ {
b.blooms[i] = make([]byte, sections/8)
}
return b, nil
}
// AddBloom takes a single bloom filter and sets the corresponding bit column
// in memory accordingly.
func (b *Generator) AddBloom(index uint, bloom types.Bloom) error {
// Make sure we're not adding more bloom filters than our capacity
if b.nextBit >= b.sections {
return errSectionOutOfBounds
}
if b.nextBit != index {
return errors.New("bloom filter with unexpected index")
}
// Rotate the bloom and insert into our collection
byteIndex := b.nextBit / 8
bitMask := byte(1) << byte(7-b.nextBit%8)
for i := 0; i < types.BloomBitLength; i++ {
bloomByteIndex := types.BloomByteLength - 1 - i/8
bloomBitMask := byte(1) << byte(i%8)
if (bloom[bloomByteIndex] & bloomBitMask) != 0 {
b.blooms[i][byteIndex] |= bitMask
}
}
b.nextBit++
return nil
}
// Bitset returns the bit vector belonging to the given bit index after all
// blooms have been added.
func (b *Generator) Bitset(idx uint) ([]byte, error) {
if b.nextBit != b.sections {
return nil, errors.New("bloom not fully generated yet")
}
if idx >= b.sections {
return nil, errSectionOutOfBounds
}
return b.blooms[idx], nil
}

@ -0,0 +1,60 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bloombits
import (
"bytes"
"math/rand"
"testing"
"github.com/ethereum/go-ethereum/core/types"
)
// Tests that batched bloom bits are correctly rotated from the input bloom
// filters.
func TestGenerator(t *testing.T) {
// Generate the input and the rotated output
var input, output [types.BloomBitLength][types.BloomByteLength]byte
for i := 0; i < types.BloomBitLength; i++ {
for j := 0; j < types.BloomBitLength; j++ {
bit := byte(rand.Int() % 2)
input[i][j/8] |= bit << byte(7-j%8)
output[types.BloomBitLength-1-j][i/8] |= bit << byte(7-i%8)
}
}
// Crunch the input through the generator and verify the result
gen, err := NewGenerator(types.BloomBitLength)
if err != nil {
t.Fatalf("failed to create bloombit generator: %v", err)
}
for i, bloom := range input {
if err := gen.AddBloom(uint(i), bloom); err != nil {
t.Fatalf("bloom %d: failed to add: %v", i, err)
}
}
for i, want := range output {
have, err := gen.Bitset(uint(i))
if err != nil {
t.Fatalf("output %d: failed to retrieve bits: %v", i, err)
}
if !bytes.Equal(have, want[:]) {
t.Errorf("output %d: bit vector mismatch have %x, want %x", i, have, want)
}
}
}

@ -0,0 +1,615 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bloombits
import (
"bytes"
"errors"
"math"
"sort"
"sync"
"sync/atomic"
"time"
"github.com/ethereum/go-ethereum/common/bitutil"
"github.com/ethereum/go-ethereum/crypto"
)
// bloomIndexes represents the bit indexes inside the bloom filter that belong
// to some key.
type bloomIndexes [3]uint
// calcBloomIndexes returns the bloom filter bit indexes belonging to the given key.
func calcBloomIndexes(b []byte) bloomIndexes {
b = crypto.Keccak256(b)
var idxs bloomIndexes
for i := 0; i < len(idxs); i++ {
idxs[i] = (uint(b[2*i])<<8)&2047 + uint(b[2*i+1])
}
return idxs
}
// partialMatches with a non-nil vector represents a section in which some sub-
// matchers have already found potential matches. Subsequent sub-matchers will
// binary AND their matches with this vector. If vector is nil, it represents a
// section to be processed by the first sub-matcher.
type partialMatches struct {
section uint64
bitset []byte
}
// Retrieval represents a request for retrieval task assignments for a given
// bit with the given number of fetch elements, or a response for such a request.
// It can also have the actual results set to be used as a delivery data struct.
type Retrieval struct {
Bit uint
Sections []uint64
Bitsets [][]byte
}
// Matcher is a pipelined system of schedulers and logic matchers which perform
// binary AND/OR operations on the bit-streams, creating a stream of potential
// blocks to inspect for data content.
type Matcher struct {
sectionSize uint64 // Size of the data batches to filter on
filters [][]bloomIndexes // Filter the system is matching for
schedulers map[uint]*scheduler // Retrieval schedulers for loading bloom bits
retrievers chan chan uint // Retriever processes waiting for bit allocations
counters chan chan uint // Retriever processes waiting for task count reports
retrievals chan chan *Retrieval // Retriever processes waiting for task allocations
deliveries chan *Retrieval // Retriever processes waiting for task response deliveries
running uint32 // Atomic flag whether a session is live or not
}
// NewMatcher creates a new pipeline for retrieving bloom bit streams and doing
// address and topic filtering on them.
func NewMatcher(sectionSize uint64, filters [][][]byte) *Matcher {
// Create the matcher instance
m := &Matcher{
sectionSize: sectionSize,
schedulers: make(map[uint]*scheduler),
retrievers: make(chan chan uint),
counters: make(chan chan uint),
retrievals: make(chan chan *Retrieval),
deliveries: make(chan *Retrieval),
}
// Calculate the bloom bit indexes for the groups we're interested in
m.filters = nil
for _, filter := range filters {
bloomBits := make([]bloomIndexes, len(filter))
for i, clause := range filter {
bloomBits[i] = calcBloomIndexes(clause)
}
m.filters = append(m.filters, bloomBits)
}
// For every bit, create a scheduler to load/download the bit vectors
for _, bloomIndexLists := range m.filters {
for _, bloomIndexList := range bloomIndexLists {
for _, bloomIndex := range bloomIndexList {
m.addScheduler(bloomIndex)
}
}
}
return m
}
// addScheduler adds a bit stream retrieval scheduler for the given bit index if
// it has not existed before. If the bit is already selected for filtering, the
// existing scheduler can be used.
func (m *Matcher) addScheduler(idx uint) {
if _, ok := m.schedulers[idx]; ok {
return
}
m.schedulers[idx] = newScheduler(idx)
}
// Start starts the matching process and returns a stream of bloom matches in
// a given range of blocks. If there are no more matches in the range, the result
// channel is closed.
func (m *Matcher) Start(begin, end uint64, results chan uint64) (*MatcherSession, error) {
// Make sure we're not creating concurrent sessions
if atomic.SwapUint32(&m.running, 1) == 1 {
return nil, errors.New("matcher already running")
}
defer atomic.StoreUint32(&m.running, 0)
// Initiate a new matching round
session := &MatcherSession{
matcher: m,
quit: make(chan struct{}),
kill: make(chan struct{}),
}
for _, scheduler := range m.schedulers {
scheduler.reset()
}
sink := m.run(begin, end, cap(results), session)
// Read the output from the result sink and deliver to the user
session.pend.Add(1)
go func() {
defer session.pend.Done()
defer close(results)
for {
select {
case <-session.quit:
return
case res, ok := <-sink:
// New match result found
if !ok {
return
}
// Calculate the first and last blocks of the section
sectionStart := res.section * m.sectionSize
first := sectionStart
if begin > first {
first = begin
}
last := sectionStart + m.sectionSize - 1
if end < last {
last = end
}
// Iterate over all the blocks in the section and return the matching ones
for i := first; i <= last; i++ {
// Skip the entire byte if no matches are found inside
next := res.bitset[(i-sectionStart)/8]
if next == 0 {
i += 7
continue
}
// Some bit it set, do the actual submatching
if bit := 7 - i%8; next&(1<<bit) != 0 {
select {
case <-session.quit:
return
case results <- i:
}
}
}
}
}
}()
return session, nil
}
// run creates a daisy-chain of sub-matchers, one for the address set and one
// for each topic set, each sub-matcher receiving a section only if the previous
// ones have all found a potential match in one of the blocks of the section,
// then binary AND-ing its own matches and forwaring the result to the next one.
//
// The method starts feeding the section indexes into the first sub-matcher on a
// new goroutine and returns a sink channel receiving the results.
func (m *Matcher) run(begin, end uint64, buffer int, session *MatcherSession) chan *partialMatches {
// Create the source channel and feed section indexes into
source := make(chan *partialMatches, buffer)
session.pend.Add(1)
go func() {
defer session.pend.Done()
defer close(source)
for i := begin / m.sectionSize; i <= end/m.sectionSize; i++ {
select {
case <-session.quit:
return
case source <- &partialMatches{i, bytes.Repeat([]byte{0xff}, int(m.sectionSize/8))}:
}
}
}()
// Assemble the daisy-chained filtering pipeline
next := source
dist := make(chan *request, buffer)
for _, bloom := range m.filters {
next = m.subMatch(next, dist, bloom, session)
}
// Start the request distribution
session.pend.Add(1)
go m.distributor(dist, session)
return next
}
// subMatch creates a sub-matcher that filters for a set of addresses or topics, binary OR-s those matches, then
// binary AND-s the result to the daisy-chain input (source) and forwards it to the daisy-chain output.
// The matches of each address/topic are calculated by fetching the given sections of the three bloom bit indexes belonging to
// that address/topic, and binary AND-ing those vectors together.
func (m *Matcher) subMatch(source chan *partialMatches, dist chan *request, bloom []bloomIndexes, session *MatcherSession) chan *partialMatches {
// Start the concurrent schedulers for each bit required by the bloom filter
sectionSources := make([][3]chan uint64, len(bloom))
sectionSinks := make([][3]chan []byte, len(bloom))
for i, bits := range bloom {
for j, bit := range bits {
sectionSources[i][j] = make(chan uint64, cap(source))
sectionSinks[i][j] = make(chan []byte, cap(source))
m.schedulers[bit].run(sectionSources[i][j], dist, sectionSinks[i][j], session.quit, &session.pend)
}
}
process := make(chan *partialMatches, cap(source)) // entries from source are forwarded here after fetches have been initiated
results := make(chan *partialMatches, cap(source))
session.pend.Add(2)
go func() {
// Tear down the goroutine and terminate all source channels
defer session.pend.Done()
defer close(process)
defer func() {
for _, bloomSources := range sectionSources {
for _, bitSource := range bloomSources {
close(bitSource)
}
}
}()
// Read sections from the source channel and multiplex into all bit-schedulers
for {
select {
case <-session.quit:
return
case subres, ok := <-source:
// New subresult from previous link
if !ok {
return
}
// Multiplex the section index to all bit-schedulers
for _, bloomSources := range sectionSources {
for _, bitSource := range bloomSources {
select {
case <-session.quit:
return
case bitSource <- subres.section:
}
}
}
// Notify the processor that this section will become available
select {
case <-session.quit:
return
case process <- subres:
}
}
}
}()
go func() {
// Tear down the goroutine and terminate the final sink channel
defer session.pend.Done()
defer close(results)
// Read the source notifications and collect the delivered results
for {
select {
case <-session.quit:
return
case subres, ok := <-process:
// Notified of a section being retrieved
if !ok {
return
}
// Gather all the sub-results and merge them together
var orVector []byte
for _, bloomSinks := range sectionSinks {
var andVector []byte
for _, bitSink := range bloomSinks {
var data []byte
select {
case <-session.quit:
return
case data = <-bitSink:
}
if andVector == nil {
andVector = make([]byte, int(m.sectionSize/8))
copy(andVector, data)
} else {
bitutil.ANDBytes(andVector, andVector, data)
}
}
if orVector == nil {
orVector = andVector
} else {
bitutil.ORBytes(orVector, orVector, andVector)
}
}
if orVector == nil {
orVector = make([]byte, int(m.sectionSize/8))
}
if subres.bitset != nil {
bitutil.ANDBytes(orVector, orVector, subres.bitset)
}
if bitutil.TestBytes(orVector) {
select {
case <-session.quit:
return
case results <- &partialMatches{subres.section, orVector}:
}
}
}
}
}()
return results
}
// distributor receives requests from the schedulers and queues them into a set
// of pending requests, which are assigned to retrievers wanting to fulfil them.
func (m *Matcher) distributor(dist chan *request, session *MatcherSession) {
defer session.pend.Done()
var (
requests = make(map[uint][]uint64) // Per-bit list of section requests, ordered by section number
unallocs = make(map[uint]struct{}) // Bits with pending requests but not allocated to any retriever
retrievers chan chan uint // Waiting retrievers (toggled to nil if unallocs is empty)
)
var (
allocs int // Number of active allocations to handle graceful shutdown requests
shutdown = session.quit // Shutdown request channel, will gracefully wait for pending requests
)
// assign is a helper method fo try to assign a pending bit an an actively
// listening servicer, or schedule it up for later when one arrives.
assign := func(bit uint) {
select {
case fetcher := <-m.retrievers:
allocs++
fetcher <- bit
default:
// No retrievers active, start listening for new ones
retrievers = m.retrievers
unallocs[bit] = struct{}{}
}
}
for {
select {
case <-shutdown:
// Graceful shutdown requested, wait until all pending requests are honoured
if allocs == 0 {
return
}
shutdown = nil
case <-session.kill:
// Pending requests not honoured in time, hard terminate
return
case req := <-dist:
// New retrieval request arrived to be distributed to some fetcher process
queue := requests[req.bit]
index := sort.Search(len(queue), func(i int) bool { return queue[i] >= req.section })
requests[req.bit] = append(queue[:index], append([]uint64{req.section}, queue[index:]...)...)
// If it's a new bit and we have waiting fetchers, allocate to them
if len(queue) == 0 {
assign(req.bit)
}
case fetcher := <-retrievers:
// New retriever arrived, find the lowest section-ed bit to assign
bit, best := uint(0), uint64(math.MaxUint64)
for idx := range unallocs {
if requests[idx][0] < best {
bit, best = idx, requests[idx][0]
}
}
// Stop tracking this bit (and alloc notifications if no more work is available)
delete(unallocs, bit)
if len(unallocs) == 0 {
retrievers = nil
}
allocs++
fetcher <- bit
case fetcher := <-m.counters:
// New task count request arrives, return number of items
fetcher <- uint(len(requests[<-fetcher]))
case fetcher := <-m.retrievals:
// New fetcher waiting for tasks to retrieve, assign
task := <-fetcher
if want := len(task.Sections); want >= len(requests[task.Bit]) {
task.Sections = requests[task.Bit]
delete(requests, task.Bit)
} else {
task.Sections = append(task.Sections[:0], requests[task.Bit][:want]...)
requests[task.Bit] = append(requests[task.Bit][:0], requests[task.Bit][want:]...)
}
fetcher <- task
// If anything was left unallocated, try to assign to someone else
if len(requests[task.Bit]) > 0 {
assign(task.Bit)
}
case result := <-m.deliveries:
// New retrieval task response from fetcher, split out missing sections and
// deliver complete ones
var (
sections = make([]uint64, 0, len(result.Sections))
bitsets = make([][]byte, 0, len(result.Bitsets))
missing = make([]uint64, 0, len(result.Sections))
)
for i, bitset := range result.Bitsets {
if len(bitset) == 0 {
missing = append(missing, result.Sections[i])
continue
}
sections = append(sections, result.Sections[i])
bitsets = append(bitsets, bitset)
}
m.schedulers[result.Bit].deliver(sections, bitsets)
allocs--
// Reschedule missing sections and allocate bit if newly available
if len(missing) > 0 {
queue := requests[result.Bit]
for _, section := range missing {
index := sort.Search(len(queue), func(i int) bool { return queue[i] >= section })
queue = append(queue[:index], append([]uint64{section}, queue[index:]...)...)
}
requests[result.Bit] = queue
if len(queue) == len(missing) {
assign(result.Bit)
}
}
// If we're in the process of shutting down, terminate
if allocs == 0 && shutdown == nil {
return
}
}
}
}
// MatcherSession is returned by a started matcher to be used as a terminator
// for the actively running matching operation.
type MatcherSession struct {
matcher *Matcher
quit chan struct{} // Quit channel to request pipeline termination
kill chan struct{} // Term channel to signal non-graceful forced shutdown
pend sync.WaitGroup
}
// Close stops the matching process and waits for all subprocesses to terminate
// before returning. The timeout may be used for graceful shutdown, allowing the
// currently running retrievals to complete before this time.
func (s *MatcherSession) Close(timeout time.Duration) {
// Bail out if the matcher is not running
select {
case <-s.quit:
return
default:
}
// Signal termination and wait for all goroutines to tear down
close(s.quit)
time.AfterFunc(timeout, func() { close(s.kill) })
s.pend.Wait()
}
// AllocateRetrieval assigns a bloom bit index to a client process that can either
// immediately reuest and fetch the section contents assigned to this bit or wait
// a little while for more sections to be requested.
func (s *MatcherSession) AllocateRetrieval() (uint, bool) {
fetcher := make(chan uint)
select {
case <-s.quit:
return 0, false
case s.matcher.retrievers <- fetcher:
bit, ok := <-fetcher
return bit, ok
}
}
// PendingSections returns the number of pending section retrievals belonging to
// the given bloom bit index.
func (s *MatcherSession) PendingSections(bit uint) int {
fetcher := make(chan uint)
select {
case <-s.quit:
return 0
case s.matcher.counters <- fetcher:
fetcher <- bit
return int(<-fetcher)
}
}
// AllocateSections assigns all or part of an already allocated bit-task queue
// to the requesting process.
func (s *MatcherSession) AllocateSections(bit uint, count int) []uint64 {
fetcher := make(chan *Retrieval)
select {
case <-s.quit:
return nil
case s.matcher.retrievals <- fetcher:
task := &Retrieval{
Bit: bit,
Sections: make([]uint64, count),
}
fetcher <- task
return (<-fetcher).Sections
}
}
// DeliverSections delivers a batch of section bit-vectors for a specific bloom
// bit index to be injected into the processing pipeline.
func (s *MatcherSession) DeliverSections(bit uint, sections []uint64, bitsets [][]byte) {
select {
case <-s.kill:
return
case s.matcher.deliveries <- &Retrieval{Bit: bit, Sections: sections, Bitsets: bitsets}:
}
}
// Multiplex polls the matcher session for rerieval tasks and multiplexes it into
// the reuested retrieval queue to be serviced together with other sessions.
//
// This method will block for the lifetime of the session. Even after termination
// of the session, any request in-flight need to be responded to! Empty responses
// are fine though in that case.
func (s *MatcherSession) Multiplex(batch int, wait time.Duration, mux chan chan *Retrieval) {
for {
// Allocate a new bloom bit index to retrieve data for, stopping when done
bit, ok := s.AllocateRetrieval()
if !ok {
return
}
// Bit allocated, throttle a bit if we're below our batch limit
if s.PendingSections(bit) < batch {
select {
case <-s.quit:
// Session terminating, we can't meaningfully service, abort
s.AllocateSections(bit, 0)
s.DeliverSections(bit, []uint64{}, [][]byte{})
return
case <-time.After(wait):
// Throttling up, fetch whatever's available
}
}
// Allocate as much as we can handle and request servicing
sections := s.AllocateSections(bit, batch)
request := make(chan *Retrieval)
select {
case <-s.quit:
// Session terminating, we can't meaningfully service, abort
s.DeliverSections(bit, sections, make([][]byte, len(sections)))
return
case mux <- request:
// Retrieval accepted, something must arrive before we're aborting
request <- &Retrieval{Bit: bit, Sections: sections}
result := <-request
s.DeliverSections(result.Bit, result.Sections, result.Bitsets)
}
}
}

@ -0,0 +1,242 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bloombits
import (
"math/rand"
"sync/atomic"
"testing"
"time"
)
const testSectionSize = 4096
// Tests the matcher pipeline on a single continuous workflow without interrupts.
func TestMatcherContinuous(t *testing.T) {
testMatcherDiffBatches(t, [][]bloomIndexes{{{10, 20, 30}}}, 100000, false, 75)
testMatcherDiffBatches(t, [][]bloomIndexes{{{32, 3125, 100}}, {{40, 50, 10}}}, 100000, false, 81)
testMatcherDiffBatches(t, [][]bloomIndexes{{{4, 8, 11}, {7, 8, 17}}, {{9, 9, 12}, {15, 20, 13}}, {{18, 15, 15}, {12, 10, 4}}}, 10000, false, 36)
}
// Tests the matcher pipeline on a constantly interrupted and resumed work pattern
// with the aim of ensuring data items are requested only once.
func TestMatcherIntermittent(t *testing.T) {
testMatcherDiffBatches(t, [][]bloomIndexes{{{10, 20, 30}}}, 100000, true, 75)
testMatcherDiffBatches(t, [][]bloomIndexes{{{32, 3125, 100}}, {{40, 50, 10}}}, 100000, true, 81)
testMatcherDiffBatches(t, [][]bloomIndexes{{{4, 8, 11}, {7, 8, 17}}, {{9, 9, 12}, {15, 20, 13}}, {{18, 15, 15}, {12, 10, 4}}}, 10000, true, 36)
}
// Tests the matcher pipeline on random input to hopefully catch anomalies.
func TestMatcherRandom(t *testing.T) {
for i := 0; i < 10; i++ {
testMatcherBothModes(t, makeRandomIndexes([]int{1}, 50), 10000, 0)
testMatcherBothModes(t, makeRandomIndexes([]int{3}, 50), 10000, 0)
testMatcherBothModes(t, makeRandomIndexes([]int{2, 2, 2}, 20), 10000, 0)
testMatcherBothModes(t, makeRandomIndexes([]int{5, 5, 5}, 50), 10000, 0)
testMatcherBothModes(t, makeRandomIndexes([]int{4, 4, 4}, 20), 10000, 0)
}
}
// Tests that matching on everything doesn't crash (special case internally).
func TestWildcardMatcher(t *testing.T) {
testMatcherBothModes(t, nil, 10000, 0)
}
// makeRandomIndexes generates a random filter system, composed on multiple filter
// criteria, each having one bloom list component for the address and arbitrarilly
// many topic bloom list components.
func makeRandomIndexes(lengths []int, max int) [][]bloomIndexes {
res := make([][]bloomIndexes, len(lengths))
for i, topics := range lengths {
res[i] = make([]bloomIndexes, topics)
for j := 0; j < topics; j++ {
for k := 0; k < len(res[i][j]); k++ {
res[i][j][k] = uint(rand.Intn(max-1) + 2)
}
}
}
return res
}
// testMatcherDiffBatches runs the given matches test in single-delivery and also
// in batches delivery mode, verifying that all kinds of deliveries are handled
// correctly withn.
func testMatcherDiffBatches(t *testing.T, filter [][]bloomIndexes, blocks uint64, intermittent bool, retrievals uint32) {
singleton := testMatcher(t, filter, blocks, intermittent, retrievals, 1)
batched := testMatcher(t, filter, blocks, intermittent, retrievals, 16)
if singleton != batched {
t.Errorf("filter = %v blocks = %v intermittent = %v: request count mismatch, %v in signleton vs. %v in batched mode", filter, blocks, intermittent, singleton, batched)
}
}
// testMatcherBothModes runs the given matcher test in both continuous as well as
// in intermittent mode, verifying that the request counts match each other.
func testMatcherBothModes(t *testing.T, filter [][]bloomIndexes, blocks uint64, retrievals uint32) {
continuous := testMatcher(t, filter, blocks, false, retrievals, 16)
intermittent := testMatcher(t, filter, blocks, true, retrievals, 16)
if continuous != intermittent {
t.Errorf("filter = %v blocks = %v: request count mismatch, %v in continuous vs. %v in intermittent mode", filter, blocks, continuous, intermittent)
}
}
// testMatcher is a generic tester to run the given matcher test and return the
// number of requests made for cross validation between different modes.
func testMatcher(t *testing.T, filter [][]bloomIndexes, blocks uint64, intermittent bool, retrievals uint32, maxReqCount int) uint32 {
// Create a new matcher an simulate our explicit random bitsets
matcher := NewMatcher(testSectionSize, nil)
matcher.filters = filter
for _, rule := range filter {
for _, topic := range rule {
for _, bit := range topic {
matcher.addScheduler(bit)
}
}
}
// Track the number of retrieval requests made
var requested uint32
// Start the matching session for the filter and the retriver goroutines
quit := make(chan struct{})
matches := make(chan uint64, 16)
session, err := matcher.Start(0, blocks-1, matches)
if err != nil {
t.Fatalf("failed to stat matcher session: %v", err)
}
startRetrievers(session, quit, &requested, maxReqCount)
// Iterate over all the blocks and verify that the pipeline produces the correct matches
for i := uint64(0); i < blocks; i++ {
if expMatch3(filter, i) {
match, ok := <-matches
if !ok {
t.Errorf("filter = %v blocks = %v intermittent = %v: expected #%v, results channel closed", filter, blocks, intermittent, i)
return 0
}
if match != i {
t.Errorf("filter = %v blocks = %v intermittent = %v: expected #%v, got #%v", filter, blocks, intermittent, i, match)
}
// If we're testing intermittent mode, abort and restart the pipeline
if intermittent {
session.Close(time.Second)
close(quit)
quit = make(chan struct{})
matches = make(chan uint64, 16)
session, err = matcher.Start(i+1, blocks-1, matches)
if err != nil {
t.Fatalf("failed to stat matcher session: %v", err)
}
startRetrievers(session, quit, &requested, maxReqCount)
}
}
}
// Ensure the result channel is torn down after the last block
match, ok := <-matches
if ok {
t.Errorf("filter = %v blocks = %v intermittent = %v: expected closed channel, got #%v", filter, blocks, intermittent, match)
}
// Clean up the session and ensure we match the expected retrieval count
session.Close(time.Second)
close(quit)
if retrievals != 0 && requested != retrievals {
t.Errorf("filter = %v blocks = %v intermittent = %v: request count mismatch, have #%v, want #%v", filter, blocks, intermittent, requested, retrievals)
}
return requested
}
// startRetrievers starts a batch of goroutines listening for section requests
// and serving them.
func startRetrievers(session *MatcherSession, quit chan struct{}, retrievals *uint32, batch int) {
requests := make(chan chan *Retrieval)
for i := 0; i < 10; i++ {
// Start a multiplexer to test multiple threaded execution
go session.Multiplex(batch, 100*time.Microsecond, requests)
// Start a services to match the above multiplexer
go func() {
for {
// Wait for a service request or a shutdown
select {
case <-quit:
return
case request := <-requests:
task := <-request
task.Bitsets = make([][]byte, len(task.Sections))
for i, section := range task.Sections {
if rand.Int()%4 != 0 { // Handle occasional missing deliveries
task.Bitsets[i] = generateBitset(task.Bit, section)
atomic.AddUint32(retrievals, 1)
}
}
request <- task
}
}
}()
}
}
// generateBitset generates the rotated bitset for the given bloom bit and section
// numbers.
func generateBitset(bit uint, section uint64) []byte {
bitset := make([]byte, testSectionSize/8)
for i := 0; i < len(bitset); i++ {
for b := 0; b < 8; b++ {
blockIdx := section*testSectionSize + uint64(i*8+b)
bitset[i] += bitset[i]
if (blockIdx % uint64(bit)) == 0 {
bitset[i]++
}
}
}
return bitset
}
func expMatch1(filter bloomIndexes, i uint64) bool {
for _, ii := range filter {
if (i % uint64(ii)) != 0 {
return false
}
}
return true
}
func expMatch2(filter []bloomIndexes, i uint64) bool {
for _, ii := range filter {
if expMatch1(ii, i) {
return true
}
}
return false
}
func expMatch3(filter [][]bloomIndexes, i uint64) bool {
for _, ii := range filter {
if !expMatch2(ii, i) {
return false
}
}
return true
}

@ -0,0 +1,181 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bloombits
import (
"sync"
)
// request represents a bloom retrieval task to prioritize and pull from the local
// database or remotely from the network.
type request struct {
section uint64 // Section index to retrieve the a bit-vector from
bit uint // Bit index within the section to retrieve the vector of
}
// response represents the state of a requested bit-vector through a scheduler.
type response struct {
cached []byte // Cached bits to dedup multiple requests
done chan struct{} // Channel to allow waiting for completion
}
// scheduler handles the scheduling of bloom-filter retrieval operations for
// entire section-batches belonging to a single bloom bit. Beside scheduling the
// retrieval operations, this struct also deduplicates the requests and caches
// the results to minimize network/database overhead even in complex filtering
// scenarios.
type scheduler struct {
bit uint // Index of the bit in the bloom filter this scheduler is responsible for
responses map[uint64]*response // Currently pending retrieval requests or already cached responses
lock sync.Mutex // Lock protecting the responses from concurrent access
}
// newScheduler creates a new bloom-filter retrieval scheduler for a specific
// bit index.
func newScheduler(idx uint) *scheduler {
return &scheduler{
bit: idx,
responses: make(map[uint64]*response),
}
}
// run creates a retrieval pipeline, receiving section indexes from sections and
// returning the results in the same order through the done channel. Concurrent
// runs of the same scheduler are allowed, leading to retrieval task deduplication.
func (s *scheduler) run(sections chan uint64, dist chan *request, done chan []byte, quit chan struct{}, wg *sync.WaitGroup) {
// Create a forwarder channel between requests and responses of the same size as
// the distribution channel (since that will block the pipeline anyway).
pend := make(chan uint64, cap(dist))
// Start the pipeline schedulers to forward between user -> distributor -> user
wg.Add(2)
go s.scheduleRequests(sections, dist, pend, quit, wg)
go s.scheduleDeliveries(pend, done, quit, wg)
}
// reset cleans up any leftovers from previous runs. This is required before a
// restart to ensure the no previously requested but never delivered state will
// cause a lockup.
func (s *scheduler) reset() {
s.lock.Lock()
defer s.lock.Unlock()
for section, res := range s.responses {
if res.cached == nil {
delete(s.responses, section)
}
}
}
// scheduleRequests reads section retrieval requests from the input channel,
// deduplicates the stream and pushes unique retrieval tasks into the distribution
// channel for a database or network layer to honour.
func (s *scheduler) scheduleRequests(reqs chan uint64, dist chan *request, pend chan uint64, quit chan struct{}, wg *sync.WaitGroup) {
// Clean up the goroutine and pipeline when done
defer wg.Done()
defer close(pend)
// Keep reading and scheduling section requests
for {
select {
case <-quit:
return
case section, ok := <-reqs:
// New section retrieval requested
if !ok {
return
}
// Deduplicate retrieval requests
unique := false
s.lock.Lock()
if s.responses[section] == nil {
s.responses[section] = &response{
done: make(chan struct{}),
}
unique = true
}
s.lock.Unlock()
// Schedule the section for retrieval and notify the deliverer to expect this section
if unique {
select {
case <-quit:
return
case dist <- &request{bit: s.bit, section: section}:
}
}
select {
case <-quit:
return
case pend <- section:
}
}
}
}
// scheduleDeliveries reads section acceptance notifications and waits for them
// to be delivered, pushing them into the output data buffer.
func (s *scheduler) scheduleDeliveries(pend chan uint64, done chan []byte, quit chan struct{}, wg *sync.WaitGroup) {
// Clean up the goroutine and pipeline when done
defer wg.Done()
defer close(done)
// Keep reading notifications and scheduling deliveries
for {
select {
case <-quit:
return
case idx, ok := <-pend:
// New section retrieval pending
if !ok {
return
}
// Wait until the request is honoured
s.lock.Lock()
res := s.responses[idx]
s.lock.Unlock()
select {
case <-quit:
return
case <-res.done:
}
// Deliver the result
select {
case <-quit:
return
case done <- res.cached:
}
}
}
}
// deliver is called by the request distributor when a reply to a request arrives.
func (s *scheduler) deliver(sections []uint64, data [][]byte) {
s.lock.Lock()
defer s.lock.Unlock()
for i, section := range sections {
if res := s.responses[section]; res != nil && res.cached == nil { // Avoid non-requests and double deliveries
res.cached = data[i]
close(res.done)
}
}
}

@ -0,0 +1,105 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bloombits
import (
"bytes"
"math/big"
"math/rand"
"sync"
"sync/atomic"
"testing"
"time"
)
// Tests that the scheduler can deduplicate and forward retrieval requests to
// underlying fetchers and serve responses back, irrelevant of the concurrency
// of the requesting clients or serving data fetchers.
func TestSchedulerSingleClientSingleFetcher(t *testing.T) { testScheduler(t, 1, 1, 5000) }
func TestSchedulerSingleClientMultiFetcher(t *testing.T) { testScheduler(t, 1, 10, 5000) }
func TestSchedulerMultiClientSingleFetcher(t *testing.T) { testScheduler(t, 10, 1, 5000) }
func TestSchedulerMultiClientMultiFetcher(t *testing.T) { testScheduler(t, 10, 10, 5000) }
func testScheduler(t *testing.T, clients int, fetchers int, requests int) {
f := newScheduler(0)
// Create a batch of handler goroutines that respond to bloom bit requests and
// deliver them to the scheduler.
var fetchPend sync.WaitGroup
fetchPend.Add(fetchers)
defer fetchPend.Wait()
fetch := make(chan *request, 16)
defer close(fetch)
var delivered uint32
for i := 0; i < fetchers; i++ {
go func() {
defer fetchPend.Done()
for req := range fetch {
time.Sleep(time.Duration(rand.Intn(int(100 * time.Microsecond))))
atomic.AddUint32(&delivered, 1)
f.deliver([]uint64{
req.section + uint64(requests), // Non-requested data (ensure it doesn't go out of bounds)
req.section, // Requested data
req.section, // Duplicated data (ensure it doesn't double close anything)
}, [][]byte{
[]byte{},
new(big.Int).SetUint64(req.section).Bytes(),
new(big.Int).SetUint64(req.section).Bytes(),
})
}
}()
}
// Start a batch of goroutines to concurrently run scheduling tasks
quit := make(chan struct{})
var pend sync.WaitGroup
pend.Add(clients)
for i := 0; i < clients; i++ {
go func() {
defer pend.Done()
in := make(chan uint64, 16)
out := make(chan []byte, 16)
f.run(in, fetch, out, quit, &pend)
go func() {
for j := 0; j < requests; j++ {
in <- uint64(j)
}
close(in)
}()
for j := 0; j < requests; j++ {
bits := <-out
if want := new(big.Int).SetUint64(uint64(j)).Bytes(); !bytes.Equal(bits, want) {
t.Errorf("vector %d: delivered content mismatch: have %x, want %x", j, bits, want)
}
}
}()
}
pend.Wait()
if have := atomic.LoadUint32(&delivered); int(have) != requests {
t.Errorf("request count mismatch: have %v, want %v", have, requests)
}
}

@ -42,9 +42,8 @@ type ChainIndexerBackend interface {
// will ensure a sequential order of headers. // will ensure a sequential order of headers.
Process(header *types.Header) Process(header *types.Header)
// Commit finalizes the section metadata and stores it into the database. This // Commit finalizes the section metadata and stores it into the database.
// interface will usually be a batch writer. Commit() error
Commit(db ethdb.Database) error
} }
// ChainIndexer does a post-processing job for equally sized sections of the // ChainIndexer does a post-processing job for equally sized sections of the
@ -102,9 +101,10 @@ func NewChainIndexer(chainDb, indexDb ethdb.Database, backend ChainIndexerBacken
} }
// Start creates a goroutine to feed chain head events into the indexer for // Start creates a goroutine to feed chain head events into the indexer for
// cascading background processing. // cascading background processing. Children do not need to be started, they
func (c *ChainIndexer) Start(currentHeader *types.Header, eventMux *event.TypeMux) { // are notified about new events by their parents.
go c.eventLoop(currentHeader, eventMux) func (c *ChainIndexer) Start(currentHeader *types.Header, chainEventer func(ch chan<- ChainEvent) event.Subscription) {
go c.eventLoop(currentHeader, chainEventer)
} }
// Close tears down all goroutines belonging to the indexer and returns any error // Close tears down all goroutines belonging to the indexer and returns any error
@ -125,6 +125,12 @@ func (c *ChainIndexer) Close() error {
errs = append(errs, err) errs = append(errs, err)
} }
} }
// Close all children
for _, child := range c.children {
if err := child.Close(); err != nil {
errs = append(errs, err)
}
}
// Return any failures // Return any failures
switch { switch {
case len(errs) == 0: case len(errs) == 0:
@ -141,12 +147,12 @@ func (c *ChainIndexer) Close() error {
// eventLoop is a secondary - optional - event loop of the indexer which is only // eventLoop is a secondary - optional - event loop of the indexer which is only
// started for the outermost indexer to push chain head events into a processing // started for the outermost indexer to push chain head events into a processing
// queue. // queue.
func (c *ChainIndexer) eventLoop(currentHeader *types.Header, eventMux *event.TypeMux) { func (c *ChainIndexer) eventLoop(currentHeader *types.Header, chainEventer func(ch chan<- ChainEvent) event.Subscription) {
// Mark the chain indexer as active, requiring an additional teardown // Mark the chain indexer as active, requiring an additional teardown
atomic.StoreUint32(&c.active, 1) atomic.StoreUint32(&c.active, 1)
// Subscribe to chain head events events := make(chan ChainEvent, 10)
sub := eventMux.Subscribe(ChainEvent{}) sub := chainEventer(events)
defer sub.Unsubscribe() defer sub.Unsubscribe()
// Fire the initial new head event to start any outstanding processing // Fire the initial new head event to start any outstanding processing
@ -163,14 +169,14 @@ func (c *ChainIndexer) eventLoop(currentHeader *types.Header, eventMux *event.Ty
errc <- nil errc <- nil
return return
case ev, ok := <-sub.Chan(): case ev, ok := <-events:
// Received a new event, ensure it's not nil (closing) and update // Received a new event, ensure it's not nil (closing) and update
if !ok { if !ok {
errc := <-c.quit errc := <-c.quit
errc <- nil errc <- nil
return return
} }
header := ev.Data.(ChainEvent).Block.Header() header := ev.Block.Header()
if header.ParentHash != prevHash { if header.ParentHash != prevHash {
c.newHead(FindCommonAncestor(c.chainDb, prevHeader, header).Number.Uint64(), true) c.newHead(FindCommonAncestor(c.chainDb, prevHeader, header).Number.Uint64(), true)
} }
@ -226,8 +232,10 @@ func (c *ChainIndexer) newHead(head uint64, reorg bool) {
// updateLoop is the main event loop of the indexer which pushes chain segments // updateLoop is the main event loop of the indexer which pushes chain segments
// down into the processing backend. // down into the processing backend.
func (c *ChainIndexer) updateLoop() { func (c *ChainIndexer) updateLoop() {
var updated time.Time var (
updating bool
updated time.Time
)
for { for {
select { select {
case errc := <-c.quit: case errc := <-c.quit:
@ -242,6 +250,7 @@ func (c *ChainIndexer) updateLoop() {
// Periodically print an upgrade log message to the user // Periodically print an upgrade log message to the user
if time.Since(updated) > 8*time.Second { if time.Since(updated) > 8*time.Second {
if c.knownSections > c.storedSections+1 { if c.knownSections > c.storedSections+1 {
updating = true
c.log.Info("Upgrading chain index", "percentage", c.storedSections*100/c.knownSections) c.log.Info("Upgrading chain index", "percentage", c.storedSections*100/c.knownSections)
} }
updated = time.Now() updated = time.Now()
@ -255,12 +264,19 @@ func (c *ChainIndexer) updateLoop() {
// Process the newly defined section in the background // Process the newly defined section in the background
c.lock.Unlock() c.lock.Unlock()
newHead, err := c.processSection(section, oldHead) newHead, err := c.processSection(section, oldHead)
if err != nil {
c.log.Error("Section processing failed", "error", err)
}
c.lock.Lock() c.lock.Lock()
// If processing succeeded and no reorgs occcurred, mark the section completed // If processing succeeded and no reorgs occcurred, mark the section completed
if err == nil && oldHead == c.sectionHead(section-1) { if err == nil && oldHead == c.sectionHead(section-1) {
c.setSectionHead(section, newHead) c.setSectionHead(section, newHead)
c.setValidSections(section + 1) c.setValidSections(section + 1)
if c.storedSections == c.knownSections && updating {
updating = false
c.log.Info("Finished upgrading chain index")
}
c.cascadedHead = c.storedSections*c.sectionSize - 1 c.cascadedHead = c.storedSections*c.sectionSize - 1
for _, child := range c.children { for _, child := range c.children {
@ -311,7 +327,8 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com
c.backend.Process(header) c.backend.Process(header)
lastHead = header.Hash() lastHead = header.Hash()
} }
if err := c.backend.Commit(c.chainDb); err != nil { if err := c.backend.Commit(); err != nil {
c.log.Error("Section commit failed", "error", err)
return common.Hash{}, err return common.Hash{}, err
} }
return lastHead, nil return lastHead, nil

@ -58,7 +58,6 @@ func testChainIndexer(t *testing.T, count int) {
) )
backends[i] = &testChainIndexBackend{t: t, processCh: make(chan uint64)} backends[i] = &testChainIndexBackend{t: t, processCh: make(chan uint64)}
backends[i].indexer = NewChainIndexer(db, ethdb.NewTable(db, string([]byte{byte(i)})), backends[i], sectionSize, confirmsReq, 0, fmt.Sprintf("indexer-%d", i)) backends[i].indexer = NewChainIndexer(db, ethdb.NewTable(db, string([]byte{byte(i)})), backends[i], sectionSize, confirmsReq, 0, fmt.Sprintf("indexer-%d", i))
defer backends[i].indexer.Close()
if sections, _, _ := backends[i].indexer.Sections(); sections != 0 { if sections, _, _ := backends[i].indexer.Sections(); sections != 0 {
t.Fatalf("Canonical section count mismatch: have %v, want %v", sections, 0) t.Fatalf("Canonical section count mismatch: have %v, want %v", sections, 0)
@ -67,6 +66,7 @@ func testChainIndexer(t *testing.T, count int) {
backends[i-1].indexer.AddChildIndexer(backends[i].indexer) backends[i-1].indexer.AddChildIndexer(backends[i].indexer)
} }
} }
defer backends[0].indexer.Close() // parent indexer shuts down children
// notify pings the root indexer about a new head or reorg, then expect // notify pings the root indexer about a new head or reorg, then expect
// processed blocks if a section is processable // processed blocks if a section is processable
notify := func(headNum, failNum uint64, reorg bool) { notify := func(headNum, failNum uint64, reorg bool) {
@ -226,7 +226,7 @@ func (b *testChainIndexBackend) Process(header *types.Header) {
} }
} }
func (b *testChainIndexBackend) Commit(db ethdb.Database) error { func (b *testChainIndexBackend) Commit() error {
if b.headerCnt != b.indexer.sectionSize { if b.headerCnt != b.indexer.sectionSize {
b.t.Error("Not enough headers processed") b.t.Error("Not enough headers processed")
} }

@ -23,7 +23,6 @@ import (
"errors" "errors"
"fmt" "fmt"
"math/big" "math/big"
"sync"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
@ -34,24 +33,41 @@ import (
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
) )
// DatabaseReader wraps the Get method of a backing data store.
type DatabaseReader interface {
Get(key []byte) (value []byte, err error)
}
// DatabaseWriter wraps the Put method of a backing data store.
type DatabaseWriter interface {
Put(key, value []byte) error
}
// DatabaseDeleter wraps the Delete method of a backing data store.
type DatabaseDeleter interface {
Delete(key []byte) error
}
var ( var (
headHeaderKey = []byte("LastHeader") headHeaderKey = []byte("LastHeader")
headBlockKey = []byte("LastBlock") headBlockKey = []byte("LastBlock")
headFastKey = []byte("LastFast") headFastKey = []byte("LastFast")
headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header // Data item prefixes (use single byte to avoid mixing data types, avoid `i`).
tdSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + tdSuffix -> td headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
numSuffix = []byte("n") // headerPrefix + num (uint64 big endian) + numSuffix -> hash tdSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + tdSuffix -> td
blockHashPrefix = []byte("H") // blockHashPrefix + hash -> num (uint64 big endian) numSuffix = []byte("n") // headerPrefix + num (uint64 big endian) + numSuffix -> hash
bodyPrefix = []byte("b") // bodyPrefix + num (uint64 big endian) + hash -> block body blockHashPrefix = []byte("H") // blockHashPrefix + hash -> num (uint64 big endian)
blockReceiptsPrefix = []byte("r") // blockReceiptsPrefix + num (uint64 big endian) + hash -> block receipts bodyPrefix = []byte("b") // bodyPrefix + num (uint64 big endian) + hash -> block body
lookupPrefix = []byte("l") // lookupPrefix + hash -> transaction/receipt lookup metadata blockReceiptsPrefix = []byte("r") // blockReceiptsPrefix + num (uint64 big endian) + hash -> block receipts
preimagePrefix = "secure-key-" // preimagePrefix + hash -> preimage lookupPrefix = []byte("l") // lookupPrefix + hash -> transaction/receipt lookup metadata
bloomBitsPrefix = []byte("B") // bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash -> bloom bits
mipmapPre = []byte("mipmap-log-bloom-") preimagePrefix = "secure-key-" // preimagePrefix + hash -> preimage
MIPMapLevels = []uint64{1000000, 500000, 100000, 50000, 1000} configPrefix = []byte("ethereum-config-") // config prefix for the db
configPrefix = []byte("ethereum-config-") // config prefix for the db // Chain index prefixes (use `i` + single byte to avoid mixing data types).
BloomBitsIndexPrefix = []byte("iB") // BloomBitsIndexPrefix is the data table of a chain indexer to track its progress
// used by old db, now only used for conversion // used by old db, now only used for conversion
oldReceiptsPrefix = []byte("receipts-") oldReceiptsPrefix = []byte("receipts-")
@ -59,8 +75,6 @@ var (
ErrChainConfigNotFound = errors.New("ChainConfig not found") // general config not found error ErrChainConfigNotFound = errors.New("ChainConfig not found") // general config not found error
mipmapBloomMu sync.Mutex // protect against race condition when updating mipmap blooms
preimageCounter = metrics.NewCounter("db/preimage/total") preimageCounter = metrics.NewCounter("db/preimage/total")
preimageHitCounter = metrics.NewCounter("db/preimage/hits") preimageHitCounter = metrics.NewCounter("db/preimage/hits")
) )
@ -81,7 +95,7 @@ func encodeBlockNumber(number uint64) []byte {
} }
// GetCanonicalHash retrieves a hash assigned to a canonical block number. // GetCanonicalHash retrieves a hash assigned to a canonical block number.
func GetCanonicalHash(db ethdb.Database, number uint64) common.Hash { func GetCanonicalHash(db DatabaseReader, number uint64) common.Hash {
data, _ := db.Get(append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...)) data, _ := db.Get(append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...))
if len(data) == 0 { if len(data) == 0 {
return common.Hash{} return common.Hash{}
@ -95,7 +109,7 @@ const missingNumber = uint64(0xffffffffffffffff)
// GetBlockNumber returns the block number assigned to a block hash // GetBlockNumber returns the block number assigned to a block hash
// if the corresponding header is present in the database // if the corresponding header is present in the database
func GetBlockNumber(db ethdb.Database, hash common.Hash) uint64 { func GetBlockNumber(db DatabaseReader, hash common.Hash) uint64 {
data, _ := db.Get(append(blockHashPrefix, hash.Bytes()...)) data, _ := db.Get(append(blockHashPrefix, hash.Bytes()...))
if len(data) != 8 { if len(data) != 8 {
return missingNumber return missingNumber
@ -108,7 +122,7 @@ func GetBlockNumber(db ethdb.Database, hash common.Hash) uint64 {
// last block hash is only updated upon a full block import, the last header // last block hash is only updated upon a full block import, the last header
// hash is updated already at header import, allowing head tracking for the // hash is updated already at header import, allowing head tracking for the
// light synchronization mechanism. // light synchronization mechanism.
func GetHeadHeaderHash(db ethdb.Database) common.Hash { func GetHeadHeaderHash(db DatabaseReader) common.Hash {
data, _ := db.Get(headHeaderKey) data, _ := db.Get(headHeaderKey)
if len(data) == 0 { if len(data) == 0 {
return common.Hash{} return common.Hash{}
@ -117,7 +131,7 @@ func GetHeadHeaderHash(db ethdb.Database) common.Hash {
} }
// GetHeadBlockHash retrieves the hash of the current canonical head block. // GetHeadBlockHash retrieves the hash of the current canonical head block.
func GetHeadBlockHash(db ethdb.Database) common.Hash { func GetHeadBlockHash(db DatabaseReader) common.Hash {
data, _ := db.Get(headBlockKey) data, _ := db.Get(headBlockKey)
if len(data) == 0 { if len(data) == 0 {
return common.Hash{} return common.Hash{}
@ -129,7 +143,7 @@ func GetHeadBlockHash(db ethdb.Database) common.Hash {
// fast synchronization. The difference between this and GetHeadBlockHash is that // fast synchronization. The difference between this and GetHeadBlockHash is that
// whereas the last block hash is only updated upon a full block import, the last // whereas the last block hash is only updated upon a full block import, the last
// fast hash is updated when importing pre-processed blocks. // fast hash is updated when importing pre-processed blocks.
func GetHeadFastBlockHash(db ethdb.Database) common.Hash { func GetHeadFastBlockHash(db DatabaseReader) common.Hash {
data, _ := db.Get(headFastKey) data, _ := db.Get(headFastKey)
if len(data) == 0 { if len(data) == 0 {
return common.Hash{} return common.Hash{}
@ -139,14 +153,14 @@ func GetHeadFastBlockHash(db ethdb.Database) common.Hash {
// GetHeaderRLP retrieves a block header in its raw RLP database encoding, or nil // GetHeaderRLP retrieves a block header in its raw RLP database encoding, or nil
// if the header's not found. // if the header's not found.
func GetHeaderRLP(db ethdb.Database, hash common.Hash, number uint64) rlp.RawValue { func GetHeaderRLP(db DatabaseReader, hash common.Hash, number uint64) rlp.RawValue {
data, _ := db.Get(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...)) data, _ := db.Get(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
return data return data
} }
// GetHeader retrieves the block header corresponding to the hash, nil if none // GetHeader retrieves the block header corresponding to the hash, nil if none
// found. // found.
func GetHeader(db ethdb.Database, hash common.Hash, number uint64) *types.Header { func GetHeader(db DatabaseReader, hash common.Hash, number uint64) *types.Header {
data := GetHeaderRLP(db, hash, number) data := GetHeaderRLP(db, hash, number)
if len(data) == 0 { if len(data) == 0 {
return nil return nil
@ -160,14 +174,14 @@ func GetHeader(db ethdb.Database, hash common.Hash, number uint64) *types.Header
} }
// GetBodyRLP retrieves the block body (transactions and uncles) in RLP encoding. // GetBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
func GetBodyRLP(db ethdb.Database, hash common.Hash, number uint64) rlp.RawValue { func GetBodyRLP(db DatabaseReader, hash common.Hash, number uint64) rlp.RawValue {
data, _ := db.Get(append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)) data, _ := db.Get(append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
return data return data
} }
// GetBody retrieves the block body (transactons, uncles) corresponding to the // GetBody retrieves the block body (transactons, uncles) corresponding to the
// hash, nil if none found. // hash, nil if none found.
func GetBody(db ethdb.Database, hash common.Hash, number uint64) *types.Body { func GetBody(db DatabaseReader, hash common.Hash, number uint64) *types.Body {
data := GetBodyRLP(db, hash, number) data := GetBodyRLP(db, hash, number)
if len(data) == 0 { if len(data) == 0 {
return nil return nil
@ -182,7 +196,7 @@ func GetBody(db ethdb.Database, hash common.Hash, number uint64) *types.Body {
// GetTd retrieves a block's total difficulty corresponding to the hash, nil if // GetTd retrieves a block's total difficulty corresponding to the hash, nil if
// none found. // none found.
func GetTd(db ethdb.Database, hash common.Hash, number uint64) *big.Int { func GetTd(db DatabaseReader, hash common.Hash, number uint64) *big.Int {
data, _ := db.Get(append(append(append(headerPrefix, encodeBlockNumber(number)...), hash[:]...), tdSuffix...)) data, _ := db.Get(append(append(append(headerPrefix, encodeBlockNumber(number)...), hash[:]...), tdSuffix...))
if len(data) == 0 { if len(data) == 0 {
return nil return nil
@ -201,7 +215,7 @@ func GetTd(db ethdb.Database, hash common.Hash, number uint64) *big.Int {
// //
// Note, due to concurrent download of header and block body the header and thus // Note, due to concurrent download of header and block body the header and thus
// canonical hash can be stored in the database but the body data not (yet). // canonical hash can be stored in the database but the body data not (yet).
func GetBlock(db ethdb.Database, hash common.Hash, number uint64) *types.Block { func GetBlock(db DatabaseReader, hash common.Hash, number uint64) *types.Block {
// Retrieve the block header and body contents // Retrieve the block header and body contents
header := GetHeader(db, hash, number) header := GetHeader(db, hash, number)
if header == nil { if header == nil {
@ -217,7 +231,7 @@ func GetBlock(db ethdb.Database, hash common.Hash, number uint64) *types.Block {
// GetBlockReceipts retrieves the receipts generated by the transactions included // GetBlockReceipts retrieves the receipts generated by the transactions included
// in a block given by its hash. // in a block given by its hash.
func GetBlockReceipts(db ethdb.Database, hash common.Hash, number uint64) types.Receipts { func GetBlockReceipts(db DatabaseReader, hash common.Hash, number uint64) types.Receipts {
data, _ := db.Get(append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash[:]...)) data, _ := db.Get(append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash[:]...))
if len(data) == 0 { if len(data) == 0 {
return nil return nil
@ -236,7 +250,7 @@ func GetBlockReceipts(db ethdb.Database, hash common.Hash, number uint64) types.
// GetTxLookupEntry retrieves the positional metadata associated with a transaction // GetTxLookupEntry retrieves the positional metadata associated with a transaction
// hash to allow retrieving the transaction or receipt by hash. // hash to allow retrieving the transaction or receipt by hash.
func GetTxLookupEntry(db ethdb.Database, hash common.Hash) (common.Hash, uint64, uint64) { func GetTxLookupEntry(db DatabaseReader, hash common.Hash) (common.Hash, uint64, uint64) {
// Load the positional metadata from disk and bail if it fails // Load the positional metadata from disk and bail if it fails
data, _ := db.Get(append(lookupPrefix, hash.Bytes()...)) data, _ := db.Get(append(lookupPrefix, hash.Bytes()...))
if len(data) == 0 { if len(data) == 0 {
@ -253,7 +267,7 @@ func GetTxLookupEntry(db ethdb.Database, hash common.Hash) (common.Hash, uint64,
// GetTransaction retrieves a specific transaction from the database, along with // GetTransaction retrieves a specific transaction from the database, along with
// its added positional metadata. // its added positional metadata.
func GetTransaction(db ethdb.Database, hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64) { func GetTransaction(db DatabaseReader, hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64) {
// Retrieve the lookup metadata and resolve the transaction from the body // Retrieve the lookup metadata and resolve the transaction from the body
blockHash, blockNumber, txIndex := GetTxLookupEntry(db, hash) blockHash, blockNumber, txIndex := GetTxLookupEntry(db, hash)
@ -288,7 +302,7 @@ func GetTransaction(db ethdb.Database, hash common.Hash) (*types.Transaction, co
// GetReceipt retrieves a specific transaction receipt from the database, along with // GetReceipt retrieves a specific transaction receipt from the database, along with
// its added positional metadata. // its added positional metadata.
func GetReceipt(db ethdb.Database, hash common.Hash) (*types.Receipt, common.Hash, uint64, uint64) { func GetReceipt(db DatabaseReader, hash common.Hash) (*types.Receipt, common.Hash, uint64, uint64) {
// Retrieve the lookup metadata and resolve the receipt from the receipts // Retrieve the lookup metadata and resolve the receipt from the receipts
blockHash, blockNumber, receiptIndex := GetTxLookupEntry(db, hash) blockHash, blockNumber, receiptIndex := GetTxLookupEntry(db, hash)
@ -313,8 +327,20 @@ func GetReceipt(db ethdb.Database, hash common.Hash) (*types.Receipt, common.Has
return (*types.Receipt)(&receipt), common.Hash{}, 0, 0 return (*types.Receipt)(&receipt), common.Hash{}, 0, 0
} }
// GetBloomBits retrieves the compressed bloom bit vector belonging to the given
// section and bit index from the.
func GetBloomBits(db DatabaseReader, bit uint, section uint64, head common.Hash) []byte {
key := append(append(bloomBitsPrefix, make([]byte, 10)...), head.Bytes()...)
binary.BigEndian.PutUint16(key[1:], uint16(bit))
binary.BigEndian.PutUint64(key[3:], section)
bits, _ := db.Get(key)
return bits
}
// WriteCanonicalHash stores the canonical hash for the given block number. // WriteCanonicalHash stores the canonical hash for the given block number.
func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) error { func WriteCanonicalHash(db DatabaseWriter, hash common.Hash, number uint64) error {
key := append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...) key := append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...)
if err := db.Put(key, hash.Bytes()); err != nil { if err := db.Put(key, hash.Bytes()); err != nil {
log.Crit("Failed to store number to hash mapping", "err", err) log.Crit("Failed to store number to hash mapping", "err", err)
@ -323,7 +349,7 @@ func WriteCanonicalHash(db ethdb.Database, hash common.Hash, number uint64) erro
} }
// WriteHeadHeaderHash stores the head header's hash. // WriteHeadHeaderHash stores the head header's hash.
func WriteHeadHeaderHash(db ethdb.Database, hash common.Hash) error { func WriteHeadHeaderHash(db DatabaseWriter, hash common.Hash) error {
if err := db.Put(headHeaderKey, hash.Bytes()); err != nil { if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
log.Crit("Failed to store last header's hash", "err", err) log.Crit("Failed to store last header's hash", "err", err)
} }
@ -331,7 +357,7 @@ func WriteHeadHeaderHash(db ethdb.Database, hash common.Hash) error {
} }
// WriteHeadBlockHash stores the head block's hash. // WriteHeadBlockHash stores the head block's hash.
func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error { func WriteHeadBlockHash(db DatabaseWriter, hash common.Hash) error {
if err := db.Put(headBlockKey, hash.Bytes()); err != nil { if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
log.Crit("Failed to store last block's hash", "err", err) log.Crit("Failed to store last block's hash", "err", err)
} }
@ -339,7 +365,7 @@ func WriteHeadBlockHash(db ethdb.Database, hash common.Hash) error {
} }
// WriteHeadFastBlockHash stores the fast head block's hash. // WriteHeadFastBlockHash stores the fast head block's hash.
func WriteHeadFastBlockHash(db ethdb.Database, hash common.Hash) error { func WriteHeadFastBlockHash(db DatabaseWriter, hash common.Hash) error {
if err := db.Put(headFastKey, hash.Bytes()); err != nil { if err := db.Put(headFastKey, hash.Bytes()); err != nil {
log.Crit("Failed to store last fast block's hash", "err", err) log.Crit("Failed to store last fast block's hash", "err", err)
} }
@ -347,7 +373,7 @@ func WriteHeadFastBlockHash(db ethdb.Database, hash common.Hash) error {
} }
// WriteHeader serializes a block header into the database. // WriteHeader serializes a block header into the database.
func WriteHeader(db ethdb.Database, header *types.Header) error { func WriteHeader(db DatabaseWriter, header *types.Header) error {
data, err := rlp.EncodeToBytes(header) data, err := rlp.EncodeToBytes(header)
if err != nil { if err != nil {
return err return err
@ -367,7 +393,7 @@ func WriteHeader(db ethdb.Database, header *types.Header) error {
} }
// WriteBody serializes the body of a block into the database. // WriteBody serializes the body of a block into the database.
func WriteBody(db ethdb.Database, hash common.Hash, number uint64, body *types.Body) error { func WriteBody(db DatabaseWriter, hash common.Hash, number uint64, body *types.Body) error {
data, err := rlp.EncodeToBytes(body) data, err := rlp.EncodeToBytes(body)
if err != nil { if err != nil {
return err return err
@ -376,7 +402,7 @@ func WriteBody(db ethdb.Database, hash common.Hash, number uint64, body *types.B
} }
// WriteBodyRLP writes a serialized body of a block into the database. // WriteBodyRLP writes a serialized body of a block into the database.
func WriteBodyRLP(db ethdb.Database, hash common.Hash, number uint64, rlp rlp.RawValue) error { func WriteBodyRLP(db DatabaseWriter, hash common.Hash, number uint64, rlp rlp.RawValue) error {
key := append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...) key := append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)
if err := db.Put(key, rlp); err != nil { if err := db.Put(key, rlp); err != nil {
log.Crit("Failed to store block body", "err", err) log.Crit("Failed to store block body", "err", err)
@ -385,7 +411,7 @@ func WriteBodyRLP(db ethdb.Database, hash common.Hash, number uint64, rlp rlp.Ra
} }
// WriteTd serializes the total difficulty of a block into the database. // WriteTd serializes the total difficulty of a block into the database.
func WriteTd(db ethdb.Database, hash common.Hash, number uint64, td *big.Int) error { func WriteTd(db DatabaseWriter, hash common.Hash, number uint64, td *big.Int) error {
data, err := rlp.EncodeToBytes(td) data, err := rlp.EncodeToBytes(td)
if err != nil { if err != nil {
return err return err
@ -398,7 +424,7 @@ func WriteTd(db ethdb.Database, hash common.Hash, number uint64, td *big.Int) er
} }
// WriteBlock serializes a block into the database, header and body separately. // WriteBlock serializes a block into the database, header and body separately.
func WriteBlock(db ethdb.Database, block *types.Block) error { func WriteBlock(db DatabaseWriter, block *types.Block) error {
// Store the body first to retain database consistency // Store the body first to retain database consistency
if err := WriteBody(db, block.Hash(), block.NumberU64(), block.Body()); err != nil { if err := WriteBody(db, block.Hash(), block.NumberU64(), block.Body()); err != nil {
return err return err
@ -413,7 +439,7 @@ func WriteBlock(db ethdb.Database, block *types.Block) error {
// WriteBlockReceipts stores all the transaction receipts belonging to a block // WriteBlockReceipts stores all the transaction receipts belonging to a block
// as a single receipt slice. This is used during chain reorganisations for // as a single receipt slice. This is used during chain reorganisations for
// rescheduling dropped transactions. // rescheduling dropped transactions.
func WriteBlockReceipts(db ethdb.Database, hash common.Hash, number uint64, receipts types.Receipts) error { func WriteBlockReceipts(db DatabaseWriter, hash common.Hash, number uint64, receipts types.Receipts) error {
// Convert the receipts into their storage form and serialize them // Convert the receipts into their storage form and serialize them
storageReceipts := make([]*types.ReceiptForStorage, len(receipts)) storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
for i, receipt := range receipts { for i, receipt := range receipts {
@ -458,29 +484,42 @@ func WriteTxLookupEntries(db ethdb.Database, block *types.Block) error {
return nil return nil
} }
// WriteBloomBits writes the compressed bloom bits vector belonging to the given
// section and bit index.
func WriteBloomBits(db DatabaseWriter, bit uint, section uint64, head common.Hash, bits []byte) {
key := append(append(bloomBitsPrefix, make([]byte, 10)...), head.Bytes()...)
binary.BigEndian.PutUint16(key[1:], uint16(bit))
binary.BigEndian.PutUint64(key[3:], section)
if err := db.Put(key, bits); err != nil {
log.Crit("Failed to store bloom bits", "err", err)
}
}
// DeleteCanonicalHash removes the number to hash canonical mapping. // DeleteCanonicalHash removes the number to hash canonical mapping.
func DeleteCanonicalHash(db ethdb.Database, number uint64) { func DeleteCanonicalHash(db DatabaseDeleter, number uint64) {
db.Delete(append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...)) db.Delete(append(append(headerPrefix, encodeBlockNumber(number)...), numSuffix...))
} }
// DeleteHeader removes all block header data associated with a hash. // DeleteHeader removes all block header data associated with a hash.
func DeleteHeader(db ethdb.Database, hash common.Hash, number uint64) { func DeleteHeader(db DatabaseDeleter, hash common.Hash, number uint64) {
db.Delete(append(blockHashPrefix, hash.Bytes()...)) db.Delete(append(blockHashPrefix, hash.Bytes()...))
db.Delete(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...)) db.Delete(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
} }
// DeleteBody removes all block body data associated with a hash. // DeleteBody removes all block body data associated with a hash.
func DeleteBody(db ethdb.Database, hash common.Hash, number uint64) { func DeleteBody(db DatabaseDeleter, hash common.Hash, number uint64) {
db.Delete(append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)) db.Delete(append(append(bodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
} }
// DeleteTd removes all block total difficulty data associated with a hash. // DeleteTd removes all block total difficulty data associated with a hash.
func DeleteTd(db ethdb.Database, hash common.Hash, number uint64) { func DeleteTd(db DatabaseDeleter, hash common.Hash, number uint64) {
db.Delete(append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...), tdSuffix...)) db.Delete(append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...), tdSuffix...))
} }
// DeleteBlock removes all block data associated with a hash. // DeleteBlock removes all block data associated with a hash.
func DeleteBlock(db ethdb.Database, hash common.Hash, number uint64) { func DeleteBlock(db DatabaseDeleter, hash common.Hash, number uint64) {
DeleteBlockReceipts(db, hash, number) DeleteBlockReceipts(db, hash, number)
DeleteHeader(db, hash, number) DeleteHeader(db, hash, number)
DeleteBody(db, hash, number) DeleteBody(db, hash, number)
@ -488,57 +527,15 @@ func DeleteBlock(db ethdb.Database, hash common.Hash, number uint64) {
} }
// DeleteBlockReceipts removes all receipt data associated with a block hash. // DeleteBlockReceipts removes all receipt data associated with a block hash.
func DeleteBlockReceipts(db ethdb.Database, hash common.Hash, number uint64) { func DeleteBlockReceipts(db DatabaseDeleter, hash common.Hash, number uint64) {
db.Delete(append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...)) db.Delete(append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...))
} }
// DeleteTxLookupEntry removes all transaction data associated with a hash. // DeleteTxLookupEntry removes all transaction data associated with a hash.
func DeleteTxLookupEntry(db ethdb.Database, hash common.Hash) { func DeleteTxLookupEntry(db DatabaseDeleter, hash common.Hash) {
db.Delete(append(lookupPrefix, hash.Bytes()...)) db.Delete(append(lookupPrefix, hash.Bytes()...))
} }
// returns a formatted MIP mapped key by adding prefix, canonical number and level
//
// ex. fn(98, 1000) = (prefix || 1000 || 0)
func mipmapKey(num, level uint64) []byte {
lkey := make([]byte, 8)
binary.BigEndian.PutUint64(lkey, level)
key := new(big.Int).SetUint64(num / level * level)
return append(mipmapPre, append(lkey, key.Bytes()...)...)
}
// WriteMipmapBloom writes each address included in the receipts' logs to the
// MIP bloom bin.
func WriteMipmapBloom(db ethdb.Database, number uint64, receipts types.Receipts) error {
mipmapBloomMu.Lock()
defer mipmapBloomMu.Unlock()
batch := db.NewBatch()
for _, level := range MIPMapLevels {
key := mipmapKey(number, level)
bloomDat, _ := db.Get(key)
bloom := types.BytesToBloom(bloomDat)
for _, receipt := range receipts {
for _, log := range receipt.Logs {
bloom.Add(log.Address.Big())
}
}
batch.Put(key, bloom.Bytes())
}
if err := batch.Write(); err != nil {
return fmt.Errorf("mipmap write fail for: %d: %v", number, err)
}
return nil
}
// GetMipmapBloom returns a bloom filter using the number and level as input
// parameters. For available levels see MIPMapLevels.
func GetMipmapBloom(db ethdb.Database, number, level uint64) types.Bloom {
bloomDat, _ := db.Get(mipmapKey(number, level))
return types.BytesToBloom(bloomDat)
}
// PreimageTable returns a Database instance with the key prefix for preimage entries. // PreimageTable returns a Database instance with the key prefix for preimage entries.
func PreimageTable(db ethdb.Database) ethdb.Database { func PreimageTable(db ethdb.Database) ethdb.Database {
return ethdb.NewTable(db, preimagePrefix) return ethdb.NewTable(db, preimagePrefix)
@ -567,7 +564,7 @@ func WritePreimages(db ethdb.Database, number uint64, preimages map[common.Hash]
} }
// GetBlockChainVersion reads the version number from db. // GetBlockChainVersion reads the version number from db.
func GetBlockChainVersion(db ethdb.Database) int { func GetBlockChainVersion(db DatabaseReader) int {
var vsn uint var vsn uint
enc, _ := db.Get([]byte("BlockchainVersion")) enc, _ := db.Get([]byte("BlockchainVersion"))
rlp.DecodeBytes(enc, &vsn) rlp.DecodeBytes(enc, &vsn)
@ -575,13 +572,13 @@ func GetBlockChainVersion(db ethdb.Database) int {
} }
// WriteBlockChainVersion writes vsn as the version number to db. // WriteBlockChainVersion writes vsn as the version number to db.
func WriteBlockChainVersion(db ethdb.Database, vsn int) { func WriteBlockChainVersion(db DatabaseWriter, vsn int) {
enc, _ := rlp.EncodeToBytes(uint(vsn)) enc, _ := rlp.EncodeToBytes(uint(vsn))
db.Put([]byte("BlockchainVersion"), enc) db.Put([]byte("BlockchainVersion"), enc)
} }
// WriteChainConfig writes the chain config settings to the database. // WriteChainConfig writes the chain config settings to the database.
func WriteChainConfig(db ethdb.Database, hash common.Hash, cfg *params.ChainConfig) error { func WriteChainConfig(db DatabaseWriter, hash common.Hash, cfg *params.ChainConfig) error {
// short circuit and ignore if nil config. GetChainConfig // short circuit and ignore if nil config. GetChainConfig
// will return a default. // will return a default.
if cfg == nil { if cfg == nil {
@ -597,7 +594,7 @@ func WriteChainConfig(db ethdb.Database, hash common.Hash, cfg *params.ChainConf
} }
// GetChainConfig will fetch the network settings based on the given hash. // GetChainConfig will fetch the network settings based on the given hash.
func GetChainConfig(db ethdb.Database, hash common.Hash) (*params.ChainConfig, error) { func GetChainConfig(db DatabaseReader, hash common.Hash) (*params.ChainConfig, error) {
jsonChainConfig, _ := db.Get(append(configPrefix, hash[:]...)) jsonChainConfig, _ := db.Get(append(configPrefix, hash[:]...))
if len(jsonChainConfig) == 0 { if len(jsonChainConfig) == 0 {
return nil, ErrChainConfigNotFound return nil, ErrChainConfigNotFound
@ -612,7 +609,7 @@ func GetChainConfig(db ethdb.Database, hash common.Hash) (*params.ChainConfig, e
} }
// FindCommonAncestor returns the last common ancestor of two block headers // FindCommonAncestor returns the last common ancestor of two block headers
func FindCommonAncestor(db ethdb.Database, a, b *types.Header) *types.Header { func FindCommonAncestor(db DatabaseReader, a, b *types.Header) *types.Header {
for bn := b.Number.Uint64(); a.Number.Uint64() > bn; { for bn := b.Number.Uint64(); a.Number.Uint64() > bn; {
a = GetHeader(db, a.ParentHash, a.Number.Uint64()-1) a = GetHeader(db, a.ParentHash, a.Number.Uint64()-1)
if a == nil { if a == nil {

@ -18,17 +18,13 @@ package core
import ( import (
"bytes" "bytes"
"io/ioutil"
"math/big" "math/big"
"os"
"testing" "testing"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/sha3" "github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rlp"
) )
@ -390,107 +386,3 @@ func TestBlockReceiptStorage(t *testing.T) {
t.Fatalf("deleted receipts returned: %v", rs) t.Fatalf("deleted receipts returned: %v", rs)
} }
} }
func TestMipmapBloom(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
receipt1 := new(types.Receipt)
receipt1.Logs = []*types.Log{
{Address: common.BytesToAddress([]byte("test"))},
{Address: common.BytesToAddress([]byte("address"))},
}
receipt2 := new(types.Receipt)
receipt2.Logs = []*types.Log{
{Address: common.BytesToAddress([]byte("test"))},
{Address: common.BytesToAddress([]byte("address1"))},
}
WriteMipmapBloom(db, 1, types.Receipts{receipt1})
WriteMipmapBloom(db, 2, types.Receipts{receipt2})
for _, level := range MIPMapLevels {
bloom := GetMipmapBloom(db, 2, level)
if !bloom.Test(new(big.Int).SetBytes([]byte("address1"))) {
t.Error("expected test to be included on level:", level)
}
}
// reset
db, _ = ethdb.NewMemDatabase()
receipt := new(types.Receipt)
receipt.Logs = []*types.Log{
{Address: common.BytesToAddress([]byte("test"))},
}
WriteMipmapBloom(db, 999, types.Receipts{receipt1})
receipt = new(types.Receipt)
receipt.Logs = []*types.Log{
{Address: common.BytesToAddress([]byte("test 1"))},
}
WriteMipmapBloom(db, 1000, types.Receipts{receipt})
bloom := GetMipmapBloom(db, 1000, 1000)
if bloom.TestBytes([]byte("test")) {
t.Error("test should not have been included")
}
}
func TestMipmapChain(t *testing.T) {
dir, err := ioutil.TempDir("", "mipmap")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
var (
db, _ = ethdb.NewLDBDatabase(dir, 0, 0)
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr = crypto.PubkeyToAddress(key1.PublicKey)
addr2 = common.BytesToAddress([]byte("jeff"))
hash1 = common.BytesToHash([]byte("topic1"))
)
defer db.Close()
gspec := &Genesis{
Config: params.TestChainConfig,
Alloc: GenesisAlloc{addr: {Balance: big.NewInt(1000000)}},
}
genesis := gspec.MustCommit(db)
chain, receipts := GenerateChain(params.TestChainConfig, genesis, db, 1010, func(i int, gen *BlockGen) {
var receipts types.Receipts
switch i {
case 1:
receipt := types.NewReceipt(nil, false, new(big.Int))
receipt.Logs = []*types.Log{{Address: addr, Topics: []common.Hash{hash1}}}
gen.AddUncheckedReceipt(receipt)
receipts = types.Receipts{receipt}
case 1000:
receipt := types.NewReceipt(nil, false, new(big.Int))
receipt.Logs = []*types.Log{{Address: addr2}}
gen.AddUncheckedReceipt(receipt)
receipts = types.Receipts{receipt}
}
// store the receipts
WriteMipmapBloom(db, uint64(i+1), receipts)
})
for i, block := range chain {
WriteBlock(db, block)
if err := WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil {
t.Fatalf("failed to insert block number: %v", err)
}
if err := WriteHeadBlockHash(db, block.Hash()); err != nil {
t.Fatalf("failed to insert block number: %v", err)
}
if err := WriteBlockReceipts(db, block.Hash(), block.NumberU64(), receipts[i]); err != nil {
t.Fatal("error writing block receipts:", err)
}
}
bloom := GetMipmapBloom(db, 0, 1000)
if bloom.TestBytes(addr2[:]) {
t.Error("address was included in bloom and should not have")
}
}

@ -28,10 +28,16 @@ type bytesBacked interface {
Bytes() []byte Bytes() []byte
} }
const bloomLength = 256 const (
// BloomByteLength represents the number of bytes used in a header log bloom.
BloomByteLength = 256
// Bloom represents a 256 bit bloom filter. // BloomBitLength represents the number of bits used in a header log bloom.
type Bloom [bloomLength]byte BloomBitLength = 8 * BloomByteLength
)
// Bloom represents a 2048 bit bloom filter.
type Bloom [BloomByteLength]byte
// BytesToBloom converts a byte slice to a bloom filter. // BytesToBloom converts a byte slice to a bloom filter.
// It panics if b is not of suitable size. // It panics if b is not of suitable size.
@ -47,7 +53,7 @@ func (b *Bloom) SetBytes(d []byte) {
if len(b) < len(d) { if len(b) < len(d) {
panic(fmt.Sprintf("bloom bytes too big %d %d", len(b), len(d))) panic(fmt.Sprintf("bloom bytes too big %d %d", len(b), len(d)))
} }
copy(b[bloomLength-len(d):], d) copy(b[BloomByteLength-len(d):], d)
} }
// Add adds d to the filter. Future calls of Test(d) will return true. // Add adds d to the filter. Future calls of Test(d) will return true.

@ -24,6 +24,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/bloombits"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
@ -194,3 +195,14 @@ func (b *EthApiBackend) EventMux() *event.TypeMux {
func (b *EthApiBackend) AccountManager() *accounts.Manager { func (b *EthApiBackend) AccountManager() *accounts.Manager {
return b.eth.AccountManager() return b.eth.AccountManager()
} }
func (b *EthApiBackend) BloomStatus() (uint64, uint64) {
sections, _, _ := b.eth.bloomIndexer.Sections()
return params.BloomBitsBlocks, sections
}
func (b *EthApiBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {
for i := 0; i < bloomFilterThreads; i++ {
go session.Multiplex(bloomRetrievalBatch, bloomRetrievalWait, b.eth.bloomRequests)
}
}

@ -32,6 +32,7 @@ import (
"github.com/ethereum/go-ethereum/consensus/clique" "github.com/ethereum/go-ethereum/consensus/clique"
"github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/bloombits"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/downloader"
@ -77,6 +78,9 @@ type Ethereum struct {
engine consensus.Engine engine consensus.Engine
accountManager *accounts.Manager accountManager *accounts.Manager
bloomRequests chan chan *bloombits.Retrieval // Channel receiving bloom data retrieval requests
bloomIndexer *core.ChainIndexer // Bloom indexer operating during block imports
ApiBackend *EthApiBackend ApiBackend *EthApiBackend
miner *miner.Miner miner *miner.Miner
@ -125,11 +129,10 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
networkId: config.NetworkId, networkId: config.NetworkId,
gasPrice: config.GasPrice, gasPrice: config.GasPrice,
etherbase: config.Etherbase, etherbase: config.Etherbase,
bloomRequests: make(chan chan *bloombits.Retrieval),
bloomIndexer: NewBloomIndexer(chainDb, params.BloomBitsBlocks),
} }
if err := addMipmapBloomBins(chainDb); err != nil {
return nil, err
}
log.Info("Initialising Ethereum protocol", "versions", ProtocolVersions, "network", config.NetworkId) log.Info("Initialising Ethereum protocol", "versions", ProtocolVersions, "network", config.NetworkId)
if !config.SkipBcVersionCheck { if !config.SkipBcVersionCheck {
@ -151,6 +154,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
eth.blockchain.SetHead(compat.RewindTo) eth.blockchain.SetHead(compat.RewindTo)
core.WriteChainConfig(chainDb, genesisHash, chainConfig) core.WriteChainConfig(chainDb, genesisHash, chainConfig)
} }
eth.bloomIndexer.Start(eth.blockchain.CurrentHeader(), eth.blockchain.SubscribeChainEvent)
if config.TxPool.Journal != "" { if config.TxPool.Journal != "" {
config.TxPool.Journal = ctx.ResolvePath(config.TxPool.Journal) config.TxPool.Journal = ctx.ResolvePath(config.TxPool.Journal)
@ -358,14 +362,17 @@ func (s *Ethereum) Downloader() *downloader.Downloader { return s.protocolManage
func (s *Ethereum) Protocols() []p2p.Protocol { func (s *Ethereum) Protocols() []p2p.Protocol {
if s.lesServer == nil { if s.lesServer == nil {
return s.protocolManager.SubProtocols return s.protocolManager.SubProtocols
} else {
return append(s.protocolManager.SubProtocols, s.lesServer.Protocols()...)
} }
return append(s.protocolManager.SubProtocols, s.lesServer.Protocols()...)
} }
// Start implements node.Service, starting all internal goroutines needed by the // Start implements node.Service, starting all internal goroutines needed by the
// Ethereum protocol implementation. // Ethereum protocol implementation.
func (s *Ethereum) Start(srvr *p2p.Server) error { func (s *Ethereum) Start(srvr *p2p.Server) error {
// Start the bloom bits servicing goroutines
s.startBloomHandlers()
// Start the RPC service
s.netRPCService = ethapi.NewPublicNetAPI(srvr, s.NetVersion()) s.netRPCService = ethapi.NewPublicNetAPI(srvr, s.NetVersion())
// Figure out a max peers count based on the server limits // Figure out a max peers count based on the server limits
@ -376,6 +383,7 @@ func (s *Ethereum) Start(srvr *p2p.Server) error {
maxPeers = srvr.MaxPeers / 2 maxPeers = srvr.MaxPeers / 2
} }
} }
// Start the networking layer and the light server if requested
s.protocolManager.Start(maxPeers) s.protocolManager.Start(maxPeers)
if s.lesServer != nil { if s.lesServer != nil {
s.lesServer.Start(srvr) s.lesServer.Start(srvr)
@ -389,6 +397,7 @@ func (s *Ethereum) Stop() error {
if s.stopDbUpgrade != nil { if s.stopDbUpgrade != nil {
s.stopDbUpgrade() s.stopDbUpgrade()
} }
s.bloomIndexer.Close()
s.blockchain.Stop() s.blockchain.Stop()
s.protocolManager.Stop() s.protocolManager.Stop()
if s.lesServer != nil { if s.lesServer != nil {

@ -1,74 +0,0 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package eth
import (
"math/big"
"testing"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
)
func TestMipmapUpgrade(t *testing.T) {
db, _ := ethdb.NewMemDatabase()
addr := common.BytesToAddress([]byte("jeff"))
genesis := new(core.Genesis).MustCommit(db)
chain, receipts := core.GenerateChain(params.TestChainConfig, genesis, db, 10, func(i int, gen *core.BlockGen) {
switch i {
case 1:
receipt := types.NewReceipt(nil, false, new(big.Int))
receipt.Logs = []*types.Log{{Address: addr}}
gen.AddUncheckedReceipt(receipt)
case 2:
receipt := types.NewReceipt(nil, false, new(big.Int))
receipt.Logs = []*types.Log{{Address: addr}}
gen.AddUncheckedReceipt(receipt)
}
})
for i, block := range chain {
core.WriteBlock(db, block)
if err := core.WriteCanonicalHash(db, block.Hash(), block.NumberU64()); err != nil {
t.Fatalf("failed to insert block number: %v", err)
}
if err := core.WriteHeadBlockHash(db, block.Hash()); err != nil {
t.Fatalf("failed to insert block number: %v", err)
}
if err := core.WriteBlockReceipts(db, block.Hash(), block.NumberU64(), receipts[i]); err != nil {
t.Fatal("error writing block receipts:", err)
}
}
err := addMipmapBloomBins(db)
if err != nil {
t.Fatal(err)
}
bloom := core.GetMipmapBloom(db, 1, core.MIPMapLevels[0])
if (bloom == types.Bloom{}) {
t.Error("got empty bloom filter")
}
data, _ := db.Get([]byte("setting-mipmap-version"))
if len(data) == 0 {
t.Error("setting-mipmap-version not written to database")
}
}

@ -0,0 +1,142 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package eth
import (
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/bitutil"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/bloombits"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
)
const (
// bloomServiceThreads is the number of goroutines used globally by an Ethereum
// instance to service bloombits lookups for all running filters.
bloomServiceThreads = 16
// bloomFilterThreads is the number of goroutines used locally per filter to
// multiplex requests onto the global servicing goroutines.
bloomFilterThreads = 3
// bloomRetrievalBatch is the maximum number of bloom bit retrievals to service
// in a single batch.
bloomRetrievalBatch = 16
// bloomRetrievalWait is the maximum time to wait for enough bloom bit requests
// to accumulate request an entire batch (avoiding hysteresis).
bloomRetrievalWait = time.Duration(0)
)
// startBloomHandlers starts a batch of goroutines to accept bloom bit database
// retrievals from possibly a range of filters and serving the data to satisfy.
func (eth *Ethereum) startBloomHandlers() {
for i := 0; i < bloomServiceThreads; i++ {
go func() {
for {
select {
case <-eth.shutdownChan:
return
case request := <-eth.bloomRequests:
task := <-request
task.Bitsets = make([][]byte, len(task.Sections))
for i, section := range task.Sections {
head := core.GetCanonicalHash(eth.chainDb, (section+1)*params.BloomBitsBlocks-1)
blob, err := bitutil.DecompressBytes(core.GetBloomBits(eth.chainDb, task.Bit, section, head), int(params.BloomBitsBlocks)/8)
if err != nil {
panic(err)
}
task.Bitsets[i] = blob
}
request <- task
}
}
}()
}
}
const (
// bloomConfirms is the number of confirmation blocks before a bloom section is
// considered probably final and its rotated bits are calculated.
bloomConfirms = 256
// bloomThrottling is the time to wait between processing two consecutive index
// sections. It's useful during chain upgrades to prevent disk overload.
bloomThrottling = 100 * time.Millisecond
)
// BloomIndexer implements a core.ChainIndexer, building up a rotated bloom bits index
// for the Ethereum header bloom filters, permitting blazing fast filtering.
type BloomIndexer struct {
size uint64 // section size to generate bloombits for
db ethdb.Database // database instance to write index data and metadata into
gen *bloombits.Generator // generator to rotate the bloom bits crating the bloom index
section uint64 // Section is the section number being processed currently
head common.Hash // Head is the hash of the last header processed
}
// NewBloomIndexer returns a chain indexer that generates bloom bits data for the
// canonical chain for fast logs filtering.
func NewBloomIndexer(db ethdb.Database, size uint64) *core.ChainIndexer {
backend := &BloomIndexer{
db: db,
size: size,
}
table := ethdb.NewTable(db, string(core.BloomBitsIndexPrefix))
return core.NewChainIndexer(db, table, backend, size, bloomConfirms, bloomThrottling, "bloombits")
}
// Reset implements core.ChainIndexerBackend, starting a new bloombits index
// section.
func (b *BloomIndexer) Reset(section uint64) {
gen, err := bloombits.NewGenerator(uint(b.size))
if err != nil {
panic(err)
}
b.gen, b.section, b.head = gen, section, common.Hash{}
}
// Process implements core.ChainIndexerBackend, adding a new header's bloom into
// the index.
func (b *BloomIndexer) Process(header *types.Header) {
b.gen.AddBloom(uint(header.Number.Uint64()-b.section*b.size), header.Bloom)
b.head = header.Hash()
}
// Commit implements core.ChainIndexerBackend, finalizing the bloom section and
// writing it out into the database.
func (b *BloomIndexer) Commit() error {
batch := b.db.NewBatch()
for i := 0; i < types.BloomBitLength; i++ {
bits, err := b.gen.Bitset(uint(i))
if err != nil {
return err
}
core.WriteBloomBits(batch, uint(i), b.section, b.head, bitutil.CompressBytes(bits))
}
return batch.Write()
}

@ -19,7 +19,6 @@ package eth
import ( import (
"bytes" "bytes"
"fmt"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
@ -134,46 +133,3 @@ func upgradeDeduplicateData(db ethdb.Database) func() error {
return <-errc return <-errc
} }
} }
func addMipmapBloomBins(db ethdb.Database) (err error) {
const mipmapVersion uint = 2
// check if the version is set. We ignore data for now since there's
// only one version so we can easily ignore it for now
var data []byte
data, _ = db.Get([]byte("setting-mipmap-version"))
if len(data) > 0 {
var version uint
if err := rlp.DecodeBytes(data, &version); err == nil && version == mipmapVersion {
return nil
}
}
defer func() {
if err == nil {
var val []byte
val, err = rlp.EncodeToBytes(mipmapVersion)
if err == nil {
err = db.Put([]byte("setting-mipmap-version"), val)
}
return
}
}()
latestHash := core.GetHeadBlockHash(db)
latestBlock := core.GetBlock(db, latestHash, core.GetBlockNumber(db, latestHash))
if latestBlock == nil { // clean database
return
}
tstart := time.Now()
log.Warn("Upgrading db log bloom bins")
for i := uint64(0); i <= latestBlock.NumberU64(); i++ {
hash := core.GetCanonicalHash(db, i)
if (hash == common.Hash{}) {
return fmt.Errorf("chain db corrupted. Could not find block %d.", i)
}
core.WriteMipmapBloom(db, i, core.GetBlockReceipts(db, hash, i))
}
log.Info("Bloom-bin upgrade completed", "elapsed", common.PrettyDuration(time.Since(tstart)))
return nil
}

@ -52,8 +52,8 @@ type filter struct {
// information related to the Ethereum protocol such als blocks, transactions and logs. // information related to the Ethereum protocol such als blocks, transactions and logs.
type PublicFilterAPI struct { type PublicFilterAPI struct {
backend Backend backend Backend
useMipMap bool
mux *event.TypeMux mux *event.TypeMux
quit chan struct{}
chainDb ethdb.Database chainDb ethdb.Database
events *EventSystem events *EventSystem
filtersMu sync.Mutex filtersMu sync.Mutex
@ -63,14 +63,12 @@ type PublicFilterAPI struct {
// NewPublicFilterAPI returns a new PublicFilterAPI instance. // NewPublicFilterAPI returns a new PublicFilterAPI instance.
func NewPublicFilterAPI(backend Backend, lightMode bool) *PublicFilterAPI { func NewPublicFilterAPI(backend Backend, lightMode bool) *PublicFilterAPI {
api := &PublicFilterAPI{ api := &PublicFilterAPI{
backend: backend, backend: backend,
useMipMap: !lightMode, mux: backend.EventMux(),
mux: backend.EventMux(), chainDb: backend.ChainDb(),
chainDb: backend.ChainDb(), events: NewEventSystem(backend.EventMux(), backend, lightMode),
events: NewEventSystem(backend.EventMux(), backend, lightMode), filters: make(map[rpc.ID]*filter),
filters: make(map[rpc.ID]*filter),
} }
go api.timeoutLoop() go api.timeoutLoop()
return api return api
@ -325,20 +323,20 @@ func (api *PublicFilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) {
// //
// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getlogs // https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getlogs
func (api *PublicFilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*types.Log, error) { func (api *PublicFilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*types.Log, error) {
// Convert the RPC block numbers into internal representations
if crit.FromBlock == nil { if crit.FromBlock == nil {
crit.FromBlock = big.NewInt(rpc.LatestBlockNumber.Int64()) crit.FromBlock = big.NewInt(rpc.LatestBlockNumber.Int64())
} }
if crit.ToBlock == nil { if crit.ToBlock == nil {
crit.ToBlock = big.NewInt(rpc.LatestBlockNumber.Int64()) crit.ToBlock = big.NewInt(rpc.LatestBlockNumber.Int64())
} }
// Create and run the filter to get all the logs
filter := New(api.backend, crit.FromBlock.Int64(), crit.ToBlock.Int64(), crit.Addresses, crit.Topics)
filter := New(api.backend, api.useMipMap) logs, err := filter.Logs(ctx)
filter.SetBeginBlock(crit.FromBlock.Int64()) if err != nil {
filter.SetEndBlock(crit.ToBlock.Int64()) return nil, err
filter.SetAddresses(crit.Addresses) }
filter.SetTopics(crit.Topics)
logs, err := filter.Find(ctx)
return returnLogs(logs), err return returnLogs(logs), err
} }
@ -372,21 +370,18 @@ func (api *PublicFilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*ty
return nil, fmt.Errorf("filter not found") return nil, fmt.Errorf("filter not found")
} }
filter := New(api.backend, api.useMipMap) begin := rpc.LatestBlockNumber.Int64()
if f.crit.FromBlock != nil { if f.crit.FromBlock != nil {
filter.SetBeginBlock(f.crit.FromBlock.Int64()) begin = f.crit.FromBlock.Int64()
} else {
filter.SetBeginBlock(rpc.LatestBlockNumber.Int64())
} }
end := rpc.LatestBlockNumber.Int64()
if f.crit.ToBlock != nil { if f.crit.ToBlock != nil {
filter.SetEndBlock(f.crit.ToBlock.Int64()) end = f.crit.ToBlock.Int64()
} else {
filter.SetEndBlock(rpc.LatestBlockNumber.Int64())
} }
filter.SetAddresses(f.crit.Addresses) // Create and run the filter to get all the logs
filter.SetTopics(f.crit.Topics) filter := New(api.backend, begin, end, f.crit.Addresses, f.crit.Topics)
logs, err := filter.Find(ctx) logs, err := filter.Logs(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -394,7 +389,7 @@ func (api *PublicFilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*ty
} }
// GetFilterChanges returns the logs for the filter with the given id since // GetFilterChanges returns the logs for the filter with the given id since
// last time is was called. This can be used for polling. // last time it was called. This can be used for polling.
// //
// For pending transaction and block filters the result is []common.Hash. // For pending transaction and block filters the result is []common.Hash.
// (pending)Log filters return []Log. // (pending)Log filters return []Log.

@ -0,0 +1,201 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package filters
import (
"bytes"
"context"
"fmt"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/bitutil"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/bloombits"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/node"
)
func BenchmarkBloomBits512(b *testing.B) {
benchmarkBloomBits(b, 512)
}
func BenchmarkBloomBits1k(b *testing.B) {
benchmarkBloomBits(b, 1024)
}
func BenchmarkBloomBits2k(b *testing.B) {
benchmarkBloomBits(b, 2048)
}
func BenchmarkBloomBits4k(b *testing.B) {
benchmarkBloomBits(b, 4096)
}
func BenchmarkBloomBits8k(b *testing.B) {
benchmarkBloomBits(b, 8192)
}
func BenchmarkBloomBits16k(b *testing.B) {
benchmarkBloomBits(b, 16384)
}
func BenchmarkBloomBits32k(b *testing.B) {
benchmarkBloomBits(b, 32768)
}
const benchFilterCnt = 2000
func benchmarkBloomBits(b *testing.B, sectionSize uint64) {
benchDataDir := node.DefaultDataDir() + "/geth/chaindata"
fmt.Println("Running bloombits benchmark section size:", sectionSize)
db, err := ethdb.NewLDBDatabase(benchDataDir, 128, 1024)
if err != nil {
b.Fatalf("error opening database at %v: %v", benchDataDir, err)
}
head := core.GetHeadBlockHash(db)
if head == (common.Hash{}) {
b.Fatalf("chain data not found at %v", benchDataDir)
}
clearBloomBits(db)
fmt.Println("Generating bloombits data...")
headNum := core.GetBlockNumber(db, head)
if headNum < sectionSize+512 {
b.Fatalf("not enough blocks for running a benchmark")
}
start := time.Now()
cnt := (headNum - 512) / sectionSize
var dataSize, compSize uint64
for sectionIdx := uint64(0); sectionIdx < cnt; sectionIdx++ {
bc, err := bloombits.NewGenerator(uint(sectionSize))
if err != nil {
b.Fatalf("failed to create generator: %v", err)
}
var header *types.Header
for i := sectionIdx * sectionSize; i < (sectionIdx+1)*sectionSize; i++ {
hash := core.GetCanonicalHash(db, i)
header = core.GetHeader(db, hash, i)
if header == nil {
b.Fatalf("Error creating bloomBits data")
}
bc.AddBloom(uint(i-sectionIdx*sectionSize), header.Bloom)
}
sectionHead := core.GetCanonicalHash(db, (sectionIdx+1)*sectionSize-1)
for i := 0; i < types.BloomBitLength; i++ {
data, err := bc.Bitset(uint(i))
if err != nil {
b.Fatalf("failed to retrieve bitset: %v", err)
}
comp := bitutil.CompressBytes(data)
dataSize += uint64(len(data))
compSize += uint64(len(comp))
core.WriteBloomBits(db, uint(i), sectionIdx, sectionHead, comp)
}
//if sectionIdx%50 == 0 {
// fmt.Println(" section", sectionIdx, "/", cnt)
//}
}
d := time.Since(start)
fmt.Println("Finished generating bloombits data")
fmt.Println(" ", d, "total ", d/time.Duration(cnt*sectionSize), "per block")
fmt.Println(" data size:", dataSize, " compressed size:", compSize, " compression ratio:", float64(compSize)/float64(dataSize))
fmt.Println("Running filter benchmarks...")
start = time.Now()
mux := new(event.TypeMux)
var backend *testBackend
for i := 0; i < benchFilterCnt; i++ {
if i%20 == 0 {
db.Close()
db, _ = ethdb.NewLDBDatabase(benchDataDir, 128, 1024)
backend = &testBackend{mux, db, cnt, new(event.Feed), new(event.Feed), new(event.Feed), new(event.Feed)}
}
var addr common.Address
addr[0] = byte(i)
addr[1] = byte(i / 256)
filter := New(backend, 0, int64(cnt*sectionSize-1), []common.Address{addr}, nil)
if _, err := filter.Logs(context.Background()); err != nil {
b.Error("filter.Find error:", err)
}
}
d = time.Since(start)
fmt.Println("Finished running filter benchmarks")
fmt.Println(" ", d, "total ", d/time.Duration(benchFilterCnt), "per address", d*time.Duration(1000000)/time.Duration(benchFilterCnt*cnt*sectionSize), "per million blocks")
db.Close()
}
func forEachKey(db ethdb.Database, startPrefix, endPrefix []byte, fn func(key []byte)) {
it := db.(*ethdb.LDBDatabase).NewIterator()
it.Seek(startPrefix)
for it.Valid() {
key := it.Key()
cmpLen := len(key)
if len(endPrefix) < cmpLen {
cmpLen = len(endPrefix)
}
if bytes.Compare(key[:cmpLen], endPrefix) == 1 {
break
}
fn(common.CopyBytes(key))
it.Next()
}
it.Release()
}
var bloomBitsPrefix = []byte("bloomBits-")
func clearBloomBits(db ethdb.Database) {
fmt.Println("Clearing bloombits data...")
forEachKey(db, bloomBitsPrefix, bloomBitsPrefix, func(key []byte) {
db.Delete(key)
})
}
func BenchmarkNoBloomBits(b *testing.B) {
benchDataDir := node.DefaultDataDir() + "/geth/chaindata"
fmt.Println("Running benchmark without bloombits")
db, err := ethdb.NewLDBDatabase(benchDataDir, 128, 1024)
if err != nil {
b.Fatalf("error opening database at %v: %v", benchDataDir, err)
}
head := core.GetHeadBlockHash(db)
if head == (common.Hash{}) {
b.Fatalf("chain data not found at %v", benchDataDir)
}
headNum := core.GetBlockNumber(db, head)
clearBloomBits(db)
fmt.Println("Running filter benchmarks...")
start := time.Now()
mux := new(event.TypeMux)
backend := &testBackend{mux, db, 0, new(event.Feed), new(event.Feed), new(event.Feed), new(event.Feed)}
filter := New(backend, 0, int64(headNum), []common.Address{common.Address{}}, nil)
filter.Logs(context.Background())
d := time.Since(start)
fmt.Println("Finished running filter benchmarks")
fmt.Println(" ", d, "total ", d*time.Duration(1000000)/time.Duration(headNum+1), "per million blocks")
db.Close()
}

@ -18,11 +18,12 @@ package filters
import ( import (
"context" "context"
"math"
"math/big" "math/big"
"time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/bloombits"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
@ -34,167 +35,179 @@ type Backend interface {
EventMux() *event.TypeMux EventMux() *event.TypeMux
HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error)
GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error)
SubscribeTxPreEvent(chan<- core.TxPreEvent) event.Subscription SubscribeTxPreEvent(chan<- core.TxPreEvent) event.Subscription
SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription
SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription
SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription
BloomStatus() (uint64, uint64)
ServiceFilter(ctx context.Context, session *bloombits.MatcherSession)
} }
// Filter can be used to retrieve and filter logs. // Filter can be used to retrieve and filter logs.
type Filter struct { type Filter struct {
backend Backend backend Backend
useMipMap bool
db ethdb.Database db ethdb.Database
begin, end int64 begin, end int64
addresses []common.Address addresses []common.Address
topics [][]common.Hash topics [][]common.Hash
matcher *bloombits.Matcher
} }
// New creates a new filter which uses a bloom filter on blocks to figure out whether // New creates a new filter which uses a bloom filter on blocks to figure out whether
// a particular block is interesting or not. // a particular block is interesting or not.
// MipMaps allow past blocks to be searched much more efficiently, but are not available func New(backend Backend, begin, end int64, addresses []common.Address, topics [][]common.Hash) *Filter {
// to light clients. // Flatten the address and topic filter clauses into a single filter system
func New(backend Backend, useMipMap bool) *Filter { var filters [][][]byte
if len(addresses) > 0 {
filter := make([][]byte, len(addresses))
for i, address := range addresses {
filter[i] = address.Bytes()
}
filters = append(filters, filter)
}
for _, topicList := range topics {
filter := make([][]byte, len(topicList))
for i, topic := range topicList {
filter[i] = topic.Bytes()
}
filters = append(filters, filter)
}
// Assemble and return the filter
size, _ := backend.BloomStatus()
return &Filter{ return &Filter{
backend: backend, backend: backend,
useMipMap: useMipMap, begin: begin,
end: end,
addresses: addresses,
topics: topics,
db: backend.ChainDb(), db: backend.ChainDb(),
matcher: bloombits.NewMatcher(size, filters),
} }
} }
// SetBeginBlock sets the earliest block for filtering. // Logs searches the blockchain for matching log entries, returning all from the
// -1 = latest block (i.e., the current block) // first block that contains matches, updating the start of the filter accordingly.
// hash = particular hash from-to func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) {
func (f *Filter) SetBeginBlock(begin int64) { // Figure out the limits of the filter range
f.begin = begin header, _ := f.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber)
} if header == nil {
// SetEndBlock sets the latest block for filtering.
func (f *Filter) SetEndBlock(end int64) {
f.end = end
}
// SetAddresses matches only logs that are generated from addresses that are included
// in the given addresses.
func (f *Filter) SetAddresses(addr []common.Address) {
f.addresses = addr
}
// SetTopics matches only logs that have topics matching the given topics.
func (f *Filter) SetTopics(topics [][]common.Hash) {
f.topics = topics
}
// FindOnce searches the blockchain for matching log entries, returning
// all matching entries from the first block that contains matches,
// updating the start point of the filter accordingly. If no results are
// found, a nil slice is returned.
func (f *Filter) FindOnce(ctx context.Context) ([]*types.Log, error) {
head, _ := f.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber)
if head == nil {
return nil, nil return nil, nil
} }
headBlockNumber := head.Number.Uint64() head := header.Number.Uint64()
var beginBlockNo uint64 = uint64(f.begin)
if f.begin == -1 { if f.begin == -1 {
beginBlockNo = headBlockNumber f.begin = int64(head)
} }
var endBlockNo uint64 = uint64(f.end) end := uint64(f.end)
if f.end == -1 { if f.end == -1 {
endBlockNo = headBlockNumber end = head
}
// if no addresses are present we can't make use of fast search which
// uses the mipmap bloom filters to check for fast inclusion and uses
// higher range probability in order to ensure at least a false positive
if !f.useMipMap || len(f.addresses) == 0 {
logs, blockNumber, err := f.getLogs(ctx, beginBlockNo, endBlockNo)
f.begin = int64(blockNumber + 1)
return logs, err
} }
// Gather all indexed logs, and finish with non indexed ones
logs, blockNumber := f.mipFind(beginBlockNo, endBlockNo, 0) var (
f.begin = int64(blockNumber + 1) logs []*types.Log
return logs, nil err error
} )
size, sections := f.backend.BloomStatus()
// Run filters logs with the current parameters set if indexed := sections * size; indexed > uint64(f.begin) {
func (f *Filter) Find(ctx context.Context) (logs []*types.Log, err error) { if indexed > end {
for { logs, err = f.indexedLogs(ctx, end)
newLogs, err := f.FindOnce(ctx) } else {
if len(newLogs) == 0 || err != nil { logs, err = f.indexedLogs(ctx, indexed-1)
}
if err != nil {
return logs, err return logs, err
} }
logs = append(logs, newLogs...)
} }
rest, err := f.unindexedLogs(ctx, end)
logs = append(logs, rest...)
return logs, err
} }
func (f *Filter) mipFind(start, end uint64, depth int) (logs []*types.Log, blockNumber uint64) { // indexedLogs returns the logs matching the filter criteria based on the bloom
level := core.MIPMapLevels[depth] // bits indexed available locally or via the network.
// normalise numerator so we can work in level specific batches and func (f *Filter) indexedLogs(ctx context.Context, end uint64) ([]*types.Log, error) {
// work with the proper range checks // Create a matcher session and request servicing from the backend
for num := start / level * level; num <= end; num += level { matches := make(chan uint64, 64)
// find addresses in bloom filters
bloom := core.GetMipmapBloom(f.db, num, level) session, err := f.matcher.Start(uint64(f.begin), end, matches)
// Don't bother checking the first time through the loop - we're probably picking if err != nil {
// up where a previous run left off. return nil, err
first := true }
for _, addr := range f.addresses { defer session.Close(time.Second)
if first || bloom.TestBytes(addr[:]) {
first = false f.backend.ServiceFilter(ctx, session)
// range check normalised values and make sure that
// we're resolving the correct range instead of the // Iterate over the matches until exhausted or context closed
// normalised values. var logs []*types.Log
start := uint64(math.Max(float64(num), float64(start)))
end := uint64(math.Min(float64(num+level-1), float64(end))) for {
if depth+1 == len(core.MIPMapLevels) { select {
l, blockNumber, _ := f.getLogs(context.Background(), start, end) case number, ok := <-matches:
if len(l) > 0 { // Abort if all matches have been fulfilled
return l, blockNumber if !ok {
} f.begin = int64(end) + 1
} else { return logs, nil
l, blockNumber := f.mipFind(start, end, depth+1) }
if len(l) > 0 { // Retrieve the suggested block and pull any truly matching logs
return l, blockNumber header, err := f.backend.HeaderByNumber(ctx, rpc.BlockNumber(number))
} if header == nil || err != nil {
} return logs, err
}
found, err := f.checkMatches(ctx, header)
if err != nil {
return logs, err
} }
logs = append(logs, found...)
case <-ctx.Done():
return logs, ctx.Err()
} }
} }
return nil, end
} }
func (f *Filter) getLogs(ctx context.Context, start, end uint64) (logs []*types.Log, blockNumber uint64, err error) { // indexedLogs returns the logs matching the filter criteria based on raw block
for i := start; i <= end; i++ { // iteration and bloom matching.
blockNumber := rpc.BlockNumber(i) func (f *Filter) unindexedLogs(ctx context.Context, end uint64) ([]*types.Log, error) {
header, err := f.backend.HeaderByNumber(ctx, blockNumber) var logs []*types.Log
for ; f.begin <= int64(end); f.begin++ {
header, err := f.backend.HeaderByNumber(ctx, rpc.BlockNumber(f.begin))
if header == nil || err != nil { if header == nil || err != nil {
return logs, end, err return logs, err
} }
if bloomFilter(header.Bloom, f.addresses, f.topics) {
// Use bloom filtering to see if this block is interesting given the found, err := f.checkMatches(ctx, header)
// current parameters
if f.bloomFilter(header.Bloom) {
// Get the logs of the block
receipts, err := f.backend.GetReceipts(ctx, header.Hash())
if err != nil { if err != nil {
return nil, end, err return logs, err
}
var unfiltered []*types.Log
for _, receipt := range receipts {
unfiltered = append(unfiltered, ([]*types.Log)(receipt.Logs)...)
}
logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics)
if len(logs) > 0 {
return logs, uint64(blockNumber), nil
} }
logs = append(logs, found...)
} }
} }
return logs, nil
}
return logs, end, nil // checkMatches checks if the receipts belonging to the given header contain any log events that
// match the filter criteria. This function is called when the bloom filter signals a potential match.
func (f *Filter) checkMatches(ctx context.Context, header *types.Header) (logs []*types.Log, err error) {
// Get the logs of the block
receipts, err := f.backend.GetReceipts(ctx, header.Hash())
if err != nil {
return nil, err
}
var unfiltered []*types.Log
for _, receipt := range receipts {
unfiltered = append(unfiltered, ([]*types.Log)(receipt.Logs)...)
}
logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics)
if len(logs) > 0 {
return logs, nil
}
return nil, nil
} }
func includes(addresses []common.Address, a common.Address) bool { func includes(addresses []common.Address, a common.Address) bool {
@ -251,10 +264,6 @@ Logs:
return ret return ret
} }
func (f *Filter) bloomFilter(bloom types.Bloom) bool {
return bloomFilter(bloom, f.addresses, f.topics)
}
func bloomFilter(bloom types.Bloom, addresses []common.Address, topics [][]common.Hash) bool { func bloomFilter(bloom types.Bloom, addresses []common.Address, topics [][]common.Hash) bool {
if len(addresses) > 0 { if len(addresses) > 0 {
var included bool var included bool

@ -20,12 +20,14 @@ import (
"context" "context"
"fmt" "fmt"
"math/big" "math/big"
"math/rand"
"reflect" "reflect"
"testing" "testing"
"time" "time"
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/bloombits"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/event"
@ -36,6 +38,7 @@ import (
type testBackend struct { type testBackend struct {
mux *event.TypeMux mux *event.TypeMux
db ethdb.Database db ethdb.Database
sections uint64
txFeed *event.Feed txFeed *event.Feed
rmLogsFeed *event.Feed rmLogsFeed *event.Feed
logsFeed *event.Feed logsFeed *event.Feed
@ -84,6 +87,37 @@ func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subsc
return b.chainFeed.Subscribe(ch) return b.chainFeed.Subscribe(ch)
} }
func (b *testBackend) BloomStatus() (uint64, uint64) {
return params.BloomBitsBlocks, b.sections
}
func (b *testBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {
requests := make(chan chan *bloombits.Retrieval)
go session.Multiplex(16, 0, requests)
go func() {
for {
// Wait for a service request or a shutdown
select {
case <-ctx.Done():
return
case request := <-requests:
task := <-request
task.Bitsets = make([][]byte, len(task.Sections))
for i, section := range task.Sections {
if rand.Int()%4 != 0 { // Handle occasional missing deliveries
head := core.GetCanonicalHash(b.db, (section+1)*params.BloomBitsBlocks-1)
task.Bitsets[i] = core.GetBloomBits(b.db, task.Bit, section, head)
}
}
request <- task
}
}
}()
}
// TestBlockSubscription tests if a block subscription returns block hashes for posted chain events. // TestBlockSubscription tests if a block subscription returns block hashes for posted chain events.
// It creates multiple subscriptions: // It creates multiple subscriptions:
// - one at the start and should receive all posted chain events and a second (blockHashes) // - one at the start and should receive all posted chain events and a second (blockHashes)
@ -99,7 +133,7 @@ func TestBlockSubscription(t *testing.T) {
rmLogsFeed = new(event.Feed) rmLogsFeed = new(event.Feed)
logsFeed = new(event.Feed) logsFeed = new(event.Feed)
chainFeed = new(event.Feed) chainFeed = new(event.Feed)
backend = &testBackend{mux, db, txFeed, rmLogsFeed, logsFeed, chainFeed} backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
api = NewPublicFilterAPI(backend, false) api = NewPublicFilterAPI(backend, false)
genesis = new(core.Genesis).MustCommit(db) genesis = new(core.Genesis).MustCommit(db)
chain, _ = core.GenerateChain(params.TestChainConfig, genesis, db, 10, func(i int, gen *core.BlockGen) {}) chain, _ = core.GenerateChain(params.TestChainConfig, genesis, db, 10, func(i int, gen *core.BlockGen) {})
@ -156,7 +190,7 @@ func TestPendingTxFilter(t *testing.T) {
rmLogsFeed = new(event.Feed) rmLogsFeed = new(event.Feed)
logsFeed = new(event.Feed) logsFeed = new(event.Feed)
chainFeed = new(event.Feed) chainFeed = new(event.Feed)
backend = &testBackend{mux, db, txFeed, rmLogsFeed, logsFeed, chainFeed} backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
api = NewPublicFilterAPI(backend, false) api = NewPublicFilterAPI(backend, false)
transactions = []*types.Transaction{ transactions = []*types.Transaction{
@ -219,7 +253,7 @@ func TestLogFilterCreation(t *testing.T) {
rmLogsFeed = new(event.Feed) rmLogsFeed = new(event.Feed)
logsFeed = new(event.Feed) logsFeed = new(event.Feed)
chainFeed = new(event.Feed) chainFeed = new(event.Feed)
backend = &testBackend{mux, db, txFeed, rmLogsFeed, logsFeed, chainFeed} backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
api = NewPublicFilterAPI(backend, false) api = NewPublicFilterAPI(backend, false)
testCases = []struct { testCases = []struct {
@ -268,7 +302,7 @@ func TestInvalidLogFilterCreation(t *testing.T) {
rmLogsFeed = new(event.Feed) rmLogsFeed = new(event.Feed)
logsFeed = new(event.Feed) logsFeed = new(event.Feed)
chainFeed = new(event.Feed) chainFeed = new(event.Feed)
backend = &testBackend{mux, db, txFeed, rmLogsFeed, logsFeed, chainFeed} backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
api = NewPublicFilterAPI(backend, false) api = NewPublicFilterAPI(backend, false)
) )
@ -298,7 +332,7 @@ func TestLogFilter(t *testing.T) {
rmLogsFeed = new(event.Feed) rmLogsFeed = new(event.Feed)
logsFeed = new(event.Feed) logsFeed = new(event.Feed)
chainFeed = new(event.Feed) chainFeed = new(event.Feed)
backend = &testBackend{mux, db, txFeed, rmLogsFeed, logsFeed, chainFeed} backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
api = NewPublicFilterAPI(backend, false) api = NewPublicFilterAPI(backend, false)
firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111")
@ -415,7 +449,7 @@ func TestPendingLogsSubscription(t *testing.T) {
rmLogsFeed = new(event.Feed) rmLogsFeed = new(event.Feed)
logsFeed = new(event.Feed) logsFeed = new(event.Feed)
chainFeed = new(event.Feed) chainFeed = new(event.Feed)
backend = &testBackend{mux, db, txFeed, rmLogsFeed, logsFeed, chainFeed} backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
api = NewPublicFilterAPI(backend, false) api = NewPublicFilterAPI(backend, false)
firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111")

@ -41,8 +41,8 @@ func makeReceipt(addr common.Address) *types.Receipt {
return receipt return receipt
} }
func BenchmarkMipmaps(b *testing.B) { func BenchmarkFilters(b *testing.B) {
dir, err := ioutil.TempDir("", "mipmap") dir, err := ioutil.TempDir("", "filtertest")
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
@ -55,7 +55,7 @@ func BenchmarkMipmaps(b *testing.B) {
rmLogsFeed = new(event.Feed) rmLogsFeed = new(event.Feed)
logsFeed = new(event.Feed) logsFeed = new(event.Feed)
chainFeed = new(event.Feed) chainFeed = new(event.Feed)
backend = &testBackend{mux, db, txFeed, rmLogsFeed, logsFeed, chainFeed} backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr1 = crypto.PubkeyToAddress(key1.PublicKey)
addr2 = common.BytesToAddress([]byte("jeff")) addr2 = common.BytesToAddress([]byte("jeff"))
@ -66,27 +66,21 @@ func BenchmarkMipmaps(b *testing.B) {
genesis := core.GenesisBlockForTesting(db, addr1, big.NewInt(1000000)) genesis := core.GenesisBlockForTesting(db, addr1, big.NewInt(1000000))
chain, receipts := core.GenerateChain(params.TestChainConfig, genesis, db, 100010, func(i int, gen *core.BlockGen) { chain, receipts := core.GenerateChain(params.TestChainConfig, genesis, db, 100010, func(i int, gen *core.BlockGen) {
var receipts types.Receipts
switch i { switch i {
case 2403: case 2403:
receipt := makeReceipt(addr1) receipt := makeReceipt(addr1)
receipts = types.Receipts{receipt}
gen.AddUncheckedReceipt(receipt) gen.AddUncheckedReceipt(receipt)
case 1034: case 1034:
receipt := makeReceipt(addr2) receipt := makeReceipt(addr2)
receipts = types.Receipts{receipt}
gen.AddUncheckedReceipt(receipt) gen.AddUncheckedReceipt(receipt)
case 34: case 34:
receipt := makeReceipt(addr3) receipt := makeReceipt(addr3)
receipts = types.Receipts{receipt}
gen.AddUncheckedReceipt(receipt) gen.AddUncheckedReceipt(receipt)
case 99999: case 99999:
receipt := makeReceipt(addr4) receipt := makeReceipt(addr4)
receipts = types.Receipts{receipt}
gen.AddUncheckedReceipt(receipt) gen.AddUncheckedReceipt(receipt)
} }
core.WriteMipmapBloom(db, uint64(i+1), receipts)
}) })
for i, block := range chain { for i, block := range chain {
core.WriteBlock(db, block) core.WriteBlock(db, block)
@ -102,13 +96,10 @@ func BenchmarkMipmaps(b *testing.B) {
} }
b.ResetTimer() b.ResetTimer()
filter := New(backend, true) filter := New(backend, 0, -1, []common.Address{addr1, addr2, addr3, addr4}, nil)
filter.SetAddresses([]common.Address{addr1, addr2, addr3, addr4})
filter.SetBeginBlock(0)
filter.SetEndBlock(-1)
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
logs, _ := filter.Find(context.Background()) logs, _ := filter.Logs(context.Background())
if len(logs) != 4 { if len(logs) != 4 {
b.Fatal("expected 4 logs, got", len(logs)) b.Fatal("expected 4 logs, got", len(logs))
} }
@ -116,7 +107,7 @@ func BenchmarkMipmaps(b *testing.B) {
} }
func TestFilters(t *testing.T) { func TestFilters(t *testing.T) {
dir, err := ioutil.TempDir("", "mipmap") dir, err := ioutil.TempDir("", "filtertest")
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -129,7 +120,7 @@ func TestFilters(t *testing.T) {
rmLogsFeed = new(event.Feed) rmLogsFeed = new(event.Feed)
logsFeed = new(event.Feed) logsFeed = new(event.Feed)
chainFeed = new(event.Feed) chainFeed = new(event.Feed)
backend = &testBackend{mux, db, txFeed, rmLogsFeed, logsFeed, chainFeed} backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
addr = crypto.PubkeyToAddress(key1.PublicKey) addr = crypto.PubkeyToAddress(key1.PublicKey)
@ -142,7 +133,6 @@ func TestFilters(t *testing.T) {
genesis := core.GenesisBlockForTesting(db, addr, big.NewInt(1000000)) genesis := core.GenesisBlockForTesting(db, addr, big.NewInt(1000000))
chain, receipts := core.GenerateChain(params.TestChainConfig, genesis, db, 1000, func(i int, gen *core.BlockGen) { chain, receipts := core.GenerateChain(params.TestChainConfig, genesis, db, 1000, func(i int, gen *core.BlockGen) {
var receipts types.Receipts
switch i { switch i {
case 1: case 1:
receipt := types.NewReceipt(nil, false, new(big.Int)) receipt := types.NewReceipt(nil, false, new(big.Int))
@ -153,7 +143,6 @@ func TestFilters(t *testing.T) {
}, },
} }
gen.AddUncheckedReceipt(receipt) gen.AddUncheckedReceipt(receipt)
receipts = types.Receipts{receipt}
case 2: case 2:
receipt := types.NewReceipt(nil, false, new(big.Int)) receipt := types.NewReceipt(nil, false, new(big.Int))
receipt.Logs = []*types.Log{ receipt.Logs = []*types.Log{
@ -163,7 +152,6 @@ func TestFilters(t *testing.T) {
}, },
} }
gen.AddUncheckedReceipt(receipt) gen.AddUncheckedReceipt(receipt)
receipts = types.Receipts{receipt}
case 998: case 998:
receipt := types.NewReceipt(nil, false, new(big.Int)) receipt := types.NewReceipt(nil, false, new(big.Int))
receipt.Logs = []*types.Log{ receipt.Logs = []*types.Log{
@ -173,7 +161,6 @@ func TestFilters(t *testing.T) {
}, },
} }
gen.AddUncheckedReceipt(receipt) gen.AddUncheckedReceipt(receipt)
receipts = types.Receipts{receipt}
case 999: case 999:
receipt := types.NewReceipt(nil, false, new(big.Int)) receipt := types.NewReceipt(nil, false, new(big.Int))
receipt.Logs = []*types.Log{ receipt.Logs = []*types.Log{
@ -183,12 +170,7 @@ func TestFilters(t *testing.T) {
}, },
} }
gen.AddUncheckedReceipt(receipt) gen.AddUncheckedReceipt(receipt)
receipts = types.Receipts{receipt}
} }
// i is used as block number for the writes but since the i
// starts at 0 and block 0 (genesis) is already present increment
// by one
core.WriteMipmapBloom(db, uint64(i+1), receipts)
}) })
for i, block := range chain { for i, block := range chain {
core.WriteBlock(db, block) core.WriteBlock(db, block)
@ -203,23 +185,15 @@ func TestFilters(t *testing.T) {
} }
} }
filter := New(backend, true) filter := New(backend, 0, -1, []common.Address{addr}, [][]common.Hash{{hash1, hash2, hash3, hash4}})
filter.SetAddresses([]common.Address{addr})
filter.SetTopics([][]common.Hash{{hash1, hash2, hash3, hash4}})
filter.SetBeginBlock(0)
filter.SetEndBlock(-1)
logs, _ := filter.Find(context.Background()) logs, _ := filter.Logs(context.Background())
if len(logs) != 4 { if len(logs) != 4 {
t.Error("expected 4 log, got", len(logs)) t.Error("expected 4 log, got", len(logs))
} }
filter = New(backend, true) filter = New(backend, 900, 999, []common.Address{addr}, [][]common.Hash{{hash3}})
filter.SetAddresses([]common.Address{addr}) logs, _ = filter.Logs(context.Background())
filter.SetTopics([][]common.Hash{{hash3}})
filter.SetBeginBlock(900)
filter.SetEndBlock(999)
logs, _ = filter.Find(context.Background())
if len(logs) != 1 { if len(logs) != 1 {
t.Error("expected 1 log, got", len(logs)) t.Error("expected 1 log, got", len(logs))
} }
@ -227,12 +201,8 @@ func TestFilters(t *testing.T) {
t.Errorf("expected log[0].Topics[0] to be %x, got %x", hash3, logs[0].Topics[0]) t.Errorf("expected log[0].Topics[0] to be %x, got %x", hash3, logs[0].Topics[0])
} }
filter = New(backend, true) filter = New(backend, 990, -1, []common.Address{addr}, [][]common.Hash{{hash3}})
filter.SetAddresses([]common.Address{addr}) logs, _ = filter.Logs(context.Background())
filter.SetTopics([][]common.Hash{{hash3}})
filter.SetBeginBlock(990)
filter.SetEndBlock(-1)
logs, _ = filter.Find(context.Background())
if len(logs) != 1 { if len(logs) != 1 {
t.Error("expected 1 log, got", len(logs)) t.Error("expected 1 log, got", len(logs))
} }
@ -240,44 +210,32 @@ func TestFilters(t *testing.T) {
t.Errorf("expected log[0].Topics[0] to be %x, got %x", hash3, logs[0].Topics[0]) t.Errorf("expected log[0].Topics[0] to be %x, got %x", hash3, logs[0].Topics[0])
} }
filter = New(backend, true) filter = New(backend, 1, 10, nil, [][]common.Hash{{hash1, hash2}})
filter.SetTopics([][]common.Hash{{hash1, hash2}})
filter.SetBeginBlock(1)
filter.SetEndBlock(10)
logs, _ = filter.Find(context.Background()) logs, _ = filter.Logs(context.Background())
if len(logs) != 2 { if len(logs) != 2 {
t.Error("expected 2 log, got", len(logs)) t.Error("expected 2 log, got", len(logs))
} }
failHash := common.BytesToHash([]byte("fail")) failHash := common.BytesToHash([]byte("fail"))
filter = New(backend, true) filter = New(backend, 0, -1, nil, [][]common.Hash{{failHash}})
filter.SetTopics([][]common.Hash{{failHash}})
filter.SetBeginBlock(0)
filter.SetEndBlock(-1)
logs, _ = filter.Find(context.Background()) logs, _ = filter.Logs(context.Background())
if len(logs) != 0 { if len(logs) != 0 {
t.Error("expected 0 log, got", len(logs)) t.Error("expected 0 log, got", len(logs))
} }
failAddr := common.BytesToAddress([]byte("failmenow")) failAddr := common.BytesToAddress([]byte("failmenow"))
filter = New(backend, true) filter = New(backend, 0, -1, []common.Address{failAddr}, nil)
filter.SetAddresses([]common.Address{failAddr})
filter.SetBeginBlock(0)
filter.SetEndBlock(-1)
logs, _ = filter.Find(context.Background()) logs, _ = filter.Logs(context.Background())
if len(logs) != 0 { if len(logs) != 0 {
t.Error("expected 0 log, got", len(logs)) t.Error("expected 0 log, got", len(logs))
} }
filter = New(backend, true) filter = New(backend, 0, -1, nil, [][]common.Hash{{failHash}, {hash1}})
filter.SetTopics([][]common.Hash{{failHash}, {hash1}})
filter.SetBeginBlock(0)
filter.SetEndBlock(-1)
logs, _ = filter.Find(context.Background()) logs, _ = filter.Logs(context.Background())
if len(logs) != 0 { if len(logs) != 0 {
t.Error("expected 0 log, got", len(logs)) t.Error("expected 0 log, got", len(logs))
} }

@ -24,6 +24,7 @@ import (
"github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/bloombits"
"github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/core/vm"
@ -171,3 +172,10 @@ func (b *LesApiBackend) EventMux() *event.TypeMux {
func (b *LesApiBackend) AccountManager() *accounts.Manager { func (b *LesApiBackend) AccountManager() *accounts.Manager {
return b.eth.accountManager return b.eth.accountManager
} }
func (b *LesApiBackend) BloomStatus() (uint64, uint64) {
return params.BloomBitsBlocks, 0
}
func (b *LesApiBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {
}

@ -324,8 +324,6 @@ func (self *worker) wait() {
if stat == core.CanonStatTy { if stat == core.CanonStatTy {
// This puts transactions in a extra db for rpc // This puts transactions in a extra db for rpc
core.WriteTxLookupEntries(self.chainDb, block) core.WriteTxLookupEntries(self.chainDb, block)
// Write map map bloom filters
core.WriteMipmapBloom(self.chainDb, block.NumberU64(), work.receipts)
// implicit by posting ChainHeadEvent // implicit by posting ChainHeadEvent
mustCommitNewWork = false mustCommitNewWork = false
} }

@ -0,0 +1,26 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package params
// These are network parameters that need to be constant between clients, but
// aren't necesarilly consensus related.
const (
// BloomBitsBlocks is the number of blocks a single bloom bit section vector
// contains.
BloomBitsBlocks uint64 = 4096
)
Loading…
Cancel
Save