mirror of https://github.com/ethereum/go-ethereum
parent
f0e8a3e9c8
commit
26671e5488
@ -0,0 +1,212 @@ |
||||
// Copyright 2025 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package filtermaps |
||||
|
||||
import ( |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
) |
||||
|
||||
// chainView represents an immutable view of a chain with a block hash, a
|
||||
// block id and a set of receipts associated to each block number. Block id
|
||||
// can be any unique identifier of the blocks.
|
||||
// Note that id and receipts are expected to be available up to headNumber
|
||||
// while the canonical block hash is only expected up to headNumber-1 so that
|
||||
// it can be implemented by the block builder while the processed head hash
|
||||
// is not known yet.
|
||||
type chainView interface { |
||||
headNumber() uint64 |
||||
getBlockHash(number uint64) common.Hash |
||||
getBlockId(number uint64) common.Hash |
||||
getReceipts(number uint64) types.Receipts |
||||
} |
||||
|
||||
// equalViews returns true if the two chain views are equivalent.
|
||||
func equalViews(cv1, cv2 chainView) bool { |
||||
if cv1 == nil || cv2 == nil { |
||||
return false |
||||
} |
||||
head1, head2 := cv1.headNumber(), cv2.headNumber() |
||||
return head1 == head2 && cv1.getBlockId(head1) == cv2.getBlockId(head2) |
||||
} |
||||
|
||||
// matchViews returns true if the two chain views are equivalent up until the
|
||||
// specified block number. If the specified number is higher than one of the
|
||||
// heads then false is returned.
|
||||
func matchViews(cv1, cv2 chainView, number uint64) bool { |
||||
if cv1 == nil || cv2 == nil { |
||||
return false |
||||
} |
||||
head1 := cv1.headNumber() |
||||
if head1 < number { |
||||
return false |
||||
} |
||||
head2 := cv2.headNumber() |
||||
if head2 < number { |
||||
return false |
||||
} |
||||
if number == head1 || number == head2 { |
||||
return cv1.getBlockId(number) == cv2.getBlockId(number) |
||||
} |
||||
return cv1.getBlockHash(number) == cv2.getBlockHash(number) |
||||
} |
||||
|
||||
// blockchain defines functions required by the FilterMaps log indexer.
|
||||
type blockchain interface { |
||||
GetHeader(hash common.Hash, number uint64) *types.Header |
||||
GetCanonicalHash(number uint64) common.Hash |
||||
GetReceiptsByHash(hash common.Hash) types.Receipts |
||||
} |
||||
|
||||
// StoredChainView implements chainView based on a given blockchain.
|
||||
// Note that the view's head does not have to be the current canonical head
|
||||
// of the underlying blockchain, it should only possess the block headers
|
||||
// and receipts up until the expected chain view head.
|
||||
// Also note that this implementation uses the canonical block hash as block
|
||||
// id which works as long as the log index structure is not hashed into the
|
||||
// block headers. Starting from the fork that hashes the log index to the
|
||||
// block the id needs to be based on a set of fields that exactly defines the
|
||||
// block but does not include the log index root itself.
|
||||
type StoredChainView struct { |
||||
chain blockchain |
||||
head uint64 |
||||
hashes []common.Hash // block hashes starting backwards from headNumber until first canonical hash
|
||||
} |
||||
|
||||
// NewStoredChainView creates a new StoredChainView.
|
||||
func NewStoredChainView(chain blockchain, number uint64, hash common.Hash) *StoredChainView { |
||||
cv := &StoredChainView{ |
||||
chain: chain, |
||||
head: number, |
||||
hashes: []common.Hash{hash}, |
||||
} |
||||
cv.extendNonCanonical() |
||||
return cv |
||||
} |
||||
|
||||
// headNumber implements chainView.
|
||||
func (cv *StoredChainView) headNumber() uint64 { |
||||
return cv.head |
||||
} |
||||
|
||||
// getBlockHash implements chainView.
|
||||
func (cv *StoredChainView) getBlockHash(number uint64) common.Hash { |
||||
if number >= cv.head { |
||||
panic("invalid block number") |
||||
} |
||||
return cv.blockHash(number) |
||||
} |
||||
|
||||
// getBlockId implements chainView.
|
||||
func (cv *StoredChainView) getBlockId(number uint64) common.Hash { |
||||
if number > cv.head { |
||||
panic("invalid block number") |
||||
} |
||||
return cv.blockHash(number) |
||||
} |
||||
|
||||
// getReceipts implements chainView.
|
||||
func (cv *StoredChainView) getReceipts(number uint64) types.Receipts { |
||||
if number > cv.head { |
||||
panic("invalid block number") |
||||
} |
||||
return cv.chain.GetReceiptsByHash(cv.blockHash(number)) |
||||
} |
||||
|
||||
// extendNonCanonical checks whether the previously known reverse list of head
|
||||
// hashes still ends with one that is canonical on the underlying blockchain.
|
||||
// If necessary then it traverses further back on the header chain and adds
|
||||
// more hashes to the list.
|
||||
func (cv *StoredChainView) extendNonCanonical() bool { |
||||
for { |
||||
hash, number := cv.hashes[len(cv.hashes)-1], cv.head-uint64(len(cv.hashes)-1) |
||||
if cv.chain.GetCanonicalHash(number) == hash { |
||||
return true |
||||
} |
||||
if number == 0 { |
||||
log.Error("Unknown genesis block hash found") |
||||
return false |
||||
} |
||||
header := cv.chain.GetHeader(hash, number) |
||||
if header == nil { |
||||
log.Error("Header not found", "number", number, "hash", hash) |
||||
return false |
||||
} |
||||
cv.hashes = append(cv.hashes, header.ParentHash) |
||||
} |
||||
} |
||||
|
||||
// blockHash returns the given block hash without doing the head number check.
|
||||
func (cv *StoredChainView) blockHash(number uint64) common.Hash { |
||||
if number+uint64(len(cv.hashes)) <= cv.head { |
||||
hash := cv.chain.GetCanonicalHash(number) |
||||
if !cv.extendNonCanonical() { |
||||
return common.Hash{} |
||||
} |
||||
if number+uint64(len(cv.hashes)) <= cv.head { |
||||
return hash |
||||
} |
||||
} |
||||
return cv.hashes[cv.head-number] |
||||
} |
||||
|
||||
// limitedChainView wraps a chainView and truncates it at a given head number.
|
||||
type limitedChainView struct { |
||||
parent chainView |
||||
head uint64 |
||||
} |
||||
|
||||
// newLimitedChainView returns a truncated view of the given parent.
|
||||
func newLimitedChainView(parent chainView, headNumber uint64) chainView { |
||||
if headNumber >= parent.headNumber() { |
||||
return parent |
||||
} |
||||
return &limitedChainView{ |
||||
parent: parent, |
||||
head: headNumber, |
||||
} |
||||
} |
||||
|
||||
// headNumber implements chainView.
|
||||
func (cv *limitedChainView) headNumber() uint64 { |
||||
return cv.head |
||||
} |
||||
|
||||
// getBlockHash implements chainView.
|
||||
func (cv *limitedChainView) getBlockHash(number uint64) common.Hash { |
||||
if number >= cv.head { |
||||
panic("invalid block number") |
||||
} |
||||
return cv.parent.getBlockHash(number) |
||||
} |
||||
|
||||
// getBlockId implements chainView.
|
||||
func (cv *limitedChainView) getBlockId(number uint64) common.Hash { |
||||
if number > cv.head { |
||||
panic("invalid block number") |
||||
} |
||||
return cv.parent.getBlockId(number) |
||||
} |
||||
|
||||
// getReceipts implements chainView.
|
||||
func (cv *limitedChainView) getReceipts(number uint64) types.Receipts { |
||||
if number > cv.head { |
||||
panic("invalid block number") |
||||
} |
||||
return cv.parent.getReceipts(number) |
||||
} |
@ -0,0 +1,293 @@ |
||||
// Copyright 2025 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package filtermaps |
||||
|
||||
import "github.com/ethereum/go-ethereum/common" |
||||
|
||||
// checkpointList lists checkpoints for finalized epochs of a given chain.
|
||||
// This allows the indexer to start indexing from the latest available
|
||||
// checkpoint and then index tail epochs in reverse order.
|
||||
type checkpointList []epochCheckpoint |
||||
|
||||
// epochCheckpoint specified the last block of the epoch and the first log
|
||||
// value index where that block starts. This allows a log value iterator to
|
||||
// be initialized at the epoch boundary.
|
||||
type epochCheckpoint struct { |
||||
blockNumber uint64 // block that generated the last log value of the given epoch
|
||||
blockId common.Hash |
||||
firstLvIndex uint64 // first log value index of the given block
|
||||
} |
||||
|
||||
// checkpoints lists sets of checkpoints for multiple chains. The matching
|
||||
// checkpoint set is autodetected by the indexer once the canonical chain is
|
||||
// known.
|
||||
var checkpoints = []checkpointList{ |
||||
// Mainnet
|
||||
{ |
||||
{4166218, common.HexToHash("0xdd767e0426256179125551e8e40f33565a96d1c94076c7746fa79d767ed4ad65"), 67108680}, |
||||
{4514014, common.HexToHash("0x33a0879bdabea4a7a3f2b424388cbcbf2fbd519bddadf13752a259049c78e95d"), 134217343}, |
||||
{4817415, common.HexToHash("0x4f0e8c7dd04fbe0985b9394575b19f13ea66a2a628fa5b08178ce4b138c6db80"), 201326352}, |
||||
{5087733, common.HexToHash("0xc84cd5e9cda999c919803c7a53a23bb77a18827fbde401d3463f1e9e52536424"), 268435343}, |
||||
{5306107, common.HexToHash("0x13f028b5fc055d23f55a92a2eeecfbcfbda8a08e4cd519ce451ba2e70428f5f9"), 335544094}, |
||||
{5509918, common.HexToHash("0x1ed770a58a7b4d4a828b7bb44c8820a674d562b23a6a0139981abe4c489d4dad"), 402652853}, |
||||
{5670390, common.HexToHash("0x3923ee6a62e6cc5132afdadf1851ae4e73148e6fbe0a8319cafd2a120c98efa3"), 469761897}, |
||||
{5826139, common.HexToHash("0xe61bc6ef03c333805f26319e1688f82553f98aa5e902b200e0621a3371b69050"), 536870853}, |
||||
{5953029, common.HexToHash("0x43d710b1b7243b848400975048ccefdfaba091c692c7f01c619d988886cc160f"), 603979580}, |
||||
{6102846, common.HexToHash("0xa100b2018f6545cc689656b4b846677b138955b7efd30e850cd14c246430ba18"), 671088291}, |
||||
{6276718, common.HexToHash("0xb832ac448b06c104ba50faefd58b0b94d53c0fba5cb268086adad4db99c2f35f"), 738197399}, |
||||
{6448696, common.HexToHash("0x48e8ae6f729ad6c76b6cf632bd52a6df7886ed55be09d43c5004fcc1463e533b"), 805305988}, |
||||
{6655974, common.HexToHash("0xac395971a6ffc30f807848f68b97b2834f8ea13478a7615860b6a69e3d0823ca"), 872415033}, |
||||
{6873949, common.HexToHash("0xc522ddb1113b1e9a87b2bdcb11ce78756beba6454a890122f121a032b5769354"), 939523784}, |
||||
{7080953, common.HexToHash("0x3606de577d80120d1edbb64bad7fa6795e788bae342866a98cc58ce2f7575045"), 1006632796}, |
||||
{7267002, common.HexToHash("0xad770882a69d216e955e34fef84851e56c0de82deacd6187a7a41f6170cd6c6d"), 1073741045}, |
||||
{7466708, common.HexToHash("0x17a48817b3a65aba333a5b56f3ff2e86fbcc19e184b046a5305a5182fdd8eb8a"), 1140850680}, |
||||
{7661807, common.HexToHash("0xa74731ee775fbd3f4d9313c68562737dd7c8d2c9eb968791d8abe167e16ddc96"), 1207959112}, |
||||
{7834556, common.HexToHash("0xe4b4812448075508cb05a0e3257f91b49509dc78cd963676a633864db6e78956"), 1275068095}, |
||||
{7990068, common.HexToHash("0x07bd4ca38abb4584a6209e04035646aa545ebbb6c948d438d4c25bfd9cb205fa"), 1342176620}, |
||||
{8143032, common.HexToHash("0x0e3149e9637290b044ee693b8fcb66e23d22db3ad0bdda32962138ba18e59f3f"), 1409285949}, |
||||
{8297660, common.HexToHash("0x34cd24f80247f7dfaf316b2e637f4b62f72ecc90703014fb25cb98ad044fc2c0"), 1476394911}, |
||||
{8465137, common.HexToHash("0x4452fa296498248d7f10c9dc6ec1e4ae7503aa07f491e6d38b21aea5d2c658a8"), 1543503744}, |
||||
{8655820, common.HexToHash("0x7bdb9008b30be420f7152cc294ac6e5328eed5b4abd954a34105de3da24f3cc6"), 1610612619}, |
||||
{8807187, common.HexToHash("0xde03e3bfddc722c019f0b59bc55efabcd5ab68c6711f4c08d0390a56f396590d"), 1677721589}, |
||||
{8911171, common.HexToHash("0xe44f342de74ab05a2a994f8841bdf88f720b9dc260177ba4030d0f7077901324"), 1744830310}, |
||||
{8960320, common.HexToHash("0x79764f9ff6e0fe4848eda1805687872021076e4e603112861af84181395ac559"), 1811938893}, |
||||
{9085994, common.HexToHash("0x24a101d1c8a63367a0953d10dc79c3b587a93bd7fd382084708adefce0b8363f"), 1879047965}, |
||||
{9230924, common.HexToHash("0xb176a98d3acd855cbb75265fb6be955a8d51abc771e021e13275d5b3ecb07eeb"), 1946156668}, |
||||
{9390535, common.HexToHash("0x640f5e2d511a5141878d57ae7a619f19b72a2bd3ef019cf0a22d74d93d9acf07"), 2013265733}, |
||||
{9515674, common.HexToHash("0xff4a7b6b21aeaeb6e1a75ecd22b1f34c058a0ce1477ce90a8ce78165fd1d0941"), 2080374553}, |
||||
{9659426, common.HexToHash("0xc351455249343b41e9171e183612b68c3c895271c62bd2c53d9e3ab1aa865aa1"), 2147483567}, |
||||
{9794018, common.HexToHash("0xde98035b4b7f9449c256239b65c7ff2c0330de44dee190106d0a96fb6f683238"), 2214592213}, |
||||
{9923840, common.HexToHash("0x881da313a1e2b6fab58a1d6fa65b5dacfdc9d68a3112a647104955b5233f84e3"), 2281701302}, |
||||
{10042435, common.HexToHash("0x451f6459640a6f54e2a535cc3a49cfc469861da3ddc101840ab3aef9e17fa424"), 2348810174}, |
||||
{10168883, common.HexToHash("0x5d16ff5adf0df1e4dc810da60af37399ef733be7870f21112b8c2cfff4995dd9"), 2415918783}, |
||||
{10289554, common.HexToHash("0x85d5690f15a787c43b9a49e8dd6e324f0b3e0c9796d07c0cfb128e5c168f5488"), 2483027930}, |
||||
{10386676, common.HexToHash("0x20f675ea72db448024a8a0b8e3ec180cac37a5910575bc32f8d9f5cdfe3c2649"), 2550136212}, |
||||
{10479675, common.HexToHash("0x014abb07acf2330cc78800ca1f564928f2daccca4b389bf5c59f4b840d843ec0"), 2617245218}, |
||||
{10562661, common.HexToHash("0xd437607a3f81ce8b7c605e167ce5e52bf8a3e02cdc646997bd0ccc57a50ad7d1"), 2684354520}, |
||||
{10641508, common.HexToHash("0x2e8ab6470c29f90ac23dcfc58310f0208f5d0e752a0c7982a77a223eca104082"), 2751462730}, |
||||
{10717156, common.HexToHash("0x8820447b6429dd12be603c1c130be532e9db065bb4bc6b2a9d4551794d63789a"), 2818571831}, |
||||
{10784549, common.HexToHash("0xc557daab80a7cdc963d62aa881faf3ab1baceff8e027046bcd203e432e0983b3"), 2885680800}, |
||||
{10848651, common.HexToHash("0xede1b0de5db6685a6f589096ceb8fccb08d3ff60e8b00a93caa4a775b48e07fc"), 2952789740}, |
||||
{10909166, common.HexToHash("0x989db675899d13323006a4d6174557e3c5501c672afd60d8bd902fc98d37e92e"), 3019897599}, |
||||
{10972902, common.HexToHash("0x5484050cc2c7d774bc5cd6af1c2ef8c19d1de12dabe25867c9b365924ea10434"), 3087007422}, |
||||
{11036597, common.HexToHash("0x1e3686e19056587c385262d5b0a07b3ec04e804c2d59e9aaca1e5876e78f69ae"), 3154116231}, |
||||
{11102520, common.HexToHash("0x339cf302fe813cce3bb9318b860dfa8be7f688413f38a6ea1987a1b84d742b4b"), 3221224863}, |
||||
{11168162, common.HexToHash("0xc0fa21ea090627610bcac4732dff702633f310cabafc42bc500d3d4805198fe0"), 3288334273}, |
||||
{11233707, common.HexToHash("0x491c37a479b8cf22eaa3654ae34c5ddc4627df8c58ca8a6979159e1710428576"), 3355442691}, |
||||
{11300526, common.HexToHash("0xb7366d2a24df99002cffe0c9a00959c93ef0dcfc3fd17389e2020bf5caa788eb"), 3422551480}, |
||||
{11367621, common.HexToHash("0xce53df5080c5b5238bb7717dfbfd88c2f574cfbb3d91f92b57171a00e9776cd2"), 3489660710}, |
||||
{11431881, common.HexToHash("0x2a08ff9c4f6fd152166213d902f0870822429f01d5f90e384ac54a3eac0ceb3a"), 3556768626}, |
||||
{11497107, common.HexToHash("0x1f99c6b65f2b1cb06ed1786c6a0274ff1b9eacab6cb729fcd386f10ebbd88123"), 3623878389}, |
||||
{11560104, common.HexToHash("0xebe6924817bbdfe52af49667da1376bae5a2994b375d4b996e8ff2683744e37a"), 3690986640}, |
||||
{11625129, common.HexToHash("0xbe6eee325329ee2fe632d8576864c29dd1c79bab891dc0a22d5b2ac87618d26e"), 3758095773}, |
||||
{11690397, common.HexToHash("0xc28bf55f858ddf5b82d1ceb3b5258b90a9ca34df8863a1c652c4d359f5748fdf"), 3825204492}, |
||||
{11755087, common.HexToHash("0x0c10cde6ce1bbe24dc57347fe4aaebc17b7d8e8d7d97e3db573133477f494740"), 3892314051}, |
||||
{11819674, common.HexToHash("0x36b694a1776c94e4c6ae4a410931b2086de47a83e437517040e3290ce9afff67"), 3959422445}, |
||||
{11883358, common.HexToHash("0x21f447aca9ddf94ed71df9fa3648a12acc2ba603f89f24c4784936864c41945f"), 4026531743}, |
||||
{11948524, common.HexToHash("0x71a52b6cce80d3a552b0daa18beb952facf81a89bc7ca769d08ac297f317507a"), 4093640009}, |
||||
{12013168, common.HexToHash("0x9a7fb369b8d8cd0edd0d890d636096f20c63abb7eb5798ad1e578cac599e3db8"), 4160748475}, |
||||
{12078711, common.HexToHash("0x5de09329413b0c2f58d926f225197552a335ba3d5544d7bdb45e7574f78c9b8d"), 4227858275}, |
||||
{12143640, common.HexToHash("0xbeafc0e1e0586f5a95f00f2a796d7df122c79c187aa2d917129297f24b8306bd"), 4294967145}, |
||||
{12208005, common.HexToHash("0x052487095cdd4a604808e6c14e30fb68b3fa546d35585b315f287219d38ef77c"), 4362075289}, |
||||
{12272465, common.HexToHash("0x82c8a50413bd67a0d6f53b085adcd9ae8c25ecc07ed766fa80297a8dcae63b29"), 4429184610}, |
||||
{12329418, common.HexToHash("0x294c147e48d32c217ff3f27a3c8c989f15eee57a911408ec4c28d4f13a36bb3b"), 4496292968}, |
||||
{12382388, common.HexToHash("0x8c2555965ff735690d2d94ececc48df4700e079c7b21b8e601a30d4e99bc4b5b"), 4563401809}, |
||||
{12437052, common.HexToHash("0x2e38362031f36a0f3394da619dcc03be03c19700594cbd1df84c2c476a87de63"), 4630511012}, |
||||
{12490026, common.HexToHash("0x122749c02a55c9c2a1e69068f54b6c1d25419eb743e3553aba91acf1daeadc35"), 4697619920}, |
||||
{12541747, common.HexToHash("0xfb9f12aa2902da798ac05fab425434f8c7ce98050d67d416dbb32f98c21f66f7"), 4764728267}, |
||||
{12597413, common.HexToHash("0x9a7a399c2904ac8d0fec580550525e7e1a73d8f65f739bf7c05d86e389d0d3f7"), 4831837757}, |
||||
{12651950, common.HexToHash("0xb78dcb572cdafb9c4e2f3863ef518a3b2df0cd4f76faa26a423b2ca0c1cde734"), 4898946491}, |
||||
{12706472, common.HexToHash("0xfd21f41ec6b0c39287d7d48c134d1212a261c53d65db99739994b003150bbad1"), 4966054796}, |
||||
{12762929, common.HexToHash("0xc94d994bc40b2ae7dc23cf2b92cc01e84915f090bb57c0d9a67584bd564d3916"), 5033164307}, |
||||
{12816689, common.HexToHash("0x7770c72f22cbf6ccf7ab85d203088f7ede89632cf0042c690102f926a90bd09d"), 5100273412}, |
||||
{12872408, common.HexToHash("0x2e008b8c952d828875d777f7912f472af96ffc977f2ceae884006682cab6b8ed"), 5167381625}, |
||||
{12929718, common.HexToHash("0x85eb0ed3c5910c6a01b65ef0a5b76c59c2cdb5094e6e27eb87c751d77bcc2c88"), 5234491305}, |
||||
{12988757, common.HexToHash("0xdf12045bea73af18d4e71f8be8e334160f78b85f96a3535a4056409d8b61355a"), 5301600237}, |
||||
{13049172, common.HexToHash("0xf07608d97a101cd9a95fee9d9062a15bcb333263e555f8cfa31da037e0468f30"), 5368709080}, |
||||
{13108936, common.HexToHash("0x42739341db582d2f39b91ec9e8cc758777ca3f6ff9f25cd98883619fd5f026a7"), 5435817013}, |
||||
{13175495, common.HexToHash("0x564f25eacb229350b7c648b5828169e7a0344ae62e866206828e2cfad8947f10"), 5502926476}, |
||||
{13237721, common.HexToHash("0x0973425abec0fa6319701b46e07c2373b0580e3adbed6900aad27d5bf26dcb95"), 5570035419}, |
||||
{13298771, common.HexToHash("0xf3a16fec5be808c9f7782fb578dc8cef7f8e2110f7289bd03c0cc13977dd1518"), 5637143840}, |
||||
{13361281, common.HexToHash("0x3c0b6364201ca9221b61af3de27a3a87e111870b8c7efc43a6d8496e98c68690"), 5704253046}, |
||||
{13421819, common.HexToHash("0x2f472e57997b95558b99e3e5e7e0e8d4dbf8b71c081aac6536c9ff5925dac2ce"), 5771361231}, |
||||
{13480620, common.HexToHash("0xc4d689e87464a0c83c661c8e3a0614c370631de857f7e385b161dfe8bacd3e71"), 5838469468}, |
||||
{13535793, common.HexToHash("0xe7674bacc8edce9fb3efd59b92c97da48fe7ace1de314b4a67d7d032fc3bb680"), 5905578026}, |
||||
{13590588, common.HexToHash("0x6a3e86bdce7dd7d8792e1af9156edd8c3ffee7c20fed97001f58a9a2699f6594"), 5972687757}, |
||||
{13646707, common.HexToHash("0xab404a5d3709cf571b04e9493f37116eeb5dd2bc9dc10c48387c1e0199013d69"), 6039797165}, |
||||
{13703025, common.HexToHash("0x20e2fde15b8fe56f5dd7ab0f324c552038167ed44864bf3978e531ae68d6d138"), 6106905803}, |
||||
{13761024, common.HexToHash("0x2ae49275e13e780f1d29aea8507b2a708ff7bfe977efac93e050273b8b3a8164"), 6174015107}, |
||||
{13819468, common.HexToHash("0xb9d19cb31dedb1128b11cad9ffd6e58c70fe7ba65ba68f1ac63668ac5160ad85"), 6241124350}, |
||||
{13877932, common.HexToHash("0x80b1ff0bb069a8479360a15eaa84ba30da02cfacadc564837f4b1c90478addb8"), 6308232256}, |
||||
{13935384, common.HexToHash("0xe1f5469a559a6114dd469af61b118b9d9551a69bbd49a4e88f2a2d724830c871"), 6375341632}, |
||||
{13994042, common.HexToHash("0x25188fb75f2328c870ade7c38ef42ff5fddef9c4e364eebe4c5d8d9cc3ecabab"), 6442449799}, |
||||
{14051123, common.HexToHash("0xf4ef2bce9ee9222bdcf6b3a0c204676d9345e211e10c983e523930274e041ef1"), 6509559107}, |
||||
{14109189, common.HexToHash("0x80b730c28f75d8cb5ec2fb736341cd87cb4ecb2c9c614e0a4ecc0f9812675d50"), 6576667347}, |
||||
{14166822, common.HexToHash("0xf662a24b91684fa8ac462b31071f406de8d6183dba46d30d690f4407bc6af36f"), 6643777079}, |
||||
{14222488, common.HexToHash("0x7333e324c96b12f11a38d1fc2ddb4860e018b90f5dc10f3dbe19f7679bb95535"), 6710885890}, |
||||
{14277180, common.HexToHash("0x4373c1000e8e10179657689e2f0e42f88bd1601ecb4a5d83970d10287f6654cc"), 6777994595}, |
||||
{14331080, common.HexToHash("0x9c708a750a3f284ec0ee950110b36fd488cb1ec24cd0c2ea72c19551ec5c42a5"), 6845103719}, |
||||
{14384243, common.HexToHash("0x34ce7503b76335aa18dec880b0cefd388a29e0fcff6f2e1ddda8fb8c0ac1daf0"), 6912212376}, |
||||
{14437670, common.HexToHash("0x79842efd3e406b41f51935fe2e6ad20a7dd5a9db2280ebd7f602ed93da1e3c24"), 6979320543}, |
||||
{14489204, common.HexToHash("0xcd12addf0afdc229e9fe3bd0a34677a3826c5e78d4baf715f8ed36b736d6627a"), 7046430591}, |
||||
{14541688, common.HexToHash("0x55f617abf208a73fc467e8cb5feead586b671dbb0f6281570b3c44b8eabb2b9e"), 7113538755}, |
||||
{14594551, common.HexToHash("0xc7211bf772e93c8c2f945fcb6098b47c3455604cb8b94a505cb5cb720914c369"), 7180646025}, |
||||
{14645065, common.HexToHash("0x6d5b0326f4b22e2b0196986a514f23ec6e9a62f70f53300a22b21ff661a6ef7e"), 7247756883}, |
||||
{14695926, common.HexToHash("0x0a77272250e43b4bb46c02eb76944881a3c6b00a21bb9086a8229199bd62d97a"), 7314865843}, |
||||
{14746330, common.HexToHash("0xd677fdbaf8efb1bfdc138ac6b2bd5d0e890a29acb1f52f40169181ad517b0d31"), 7381974956}, |
||||
{14798546, common.HexToHash("0xbb277e8623acd2ce2340cf32f6c0ddab70fd95d862287f68a3c37250a70619cd"), 7449082890}, |
||||
{14848230, common.HexToHash("0x587b39f11bdaa2091291c7c3947e88df2e91e7997f2375dfd43b6e310a538582"), 7516192636}, |
||||
{14897646, common.HexToHash("0xf5b5c9d0c024ca0c0f0c6171871f609687f4ccb064ededbd61176cf23a9011e8"), 7583299602}, |
||||
{14950782, common.HexToHash("0x50549486afaf92a4c3520012b325e914ef77a82e4d6530a71f9b1cca31bfae18"), 7650409868}, |
||||
{15004101, common.HexToHash("0x7edac55dea3ee4308db60b9bc0524836226fe301e085b3ce39105bd145ba7fc3"), 7717517503}, |
||||
{15056903, common.HexToHash("0xb4cfd02d435718598179cdba3f5c11eb8653fe97ec8d89c60673e3e07b8dfc94"), 7784627997}, |
||||
{15108302, common.HexToHash("0x53c77a7de4515e9e93467a76f04cc401834bcdd64e9dfa03cf6d2844a6930293"), 7851736988}, |
||||
{15159526, common.HexToHash("0x1a31ad84b423254d7ff24e7eca54048ed8cc13cec5eb7289bf3f98ed4de9f724"), 7918844431}, |
||||
{15211013, common.HexToHash("0xe5d491e1d6cc5322454143b915c106be1bf28114a41b054ba5e5cfe0abecafba"), 7985953942}, |
||||
{15264389, common.HexToHash("0xd9939bb9e58e95d2672c1148b4ec5730204527d3f3fc98ca03a67dc85cf3d710"), 8053063187}, |
||||
{15315862, common.HexToHash("0x7254f99c4bb05235d5b437984c9132164e33182d4ce11a3847999da5c28b4092"), 8120172147}, |
||||
{15364726, common.HexToHash("0x11b57547579d9009679e327f57e308fe86856391805bc3c86e7b39daae890f52"), 8187281042}, |
||||
{15412886, common.HexToHash("0xbe3602b1dbef9015a3ec7968ac7652edf4424934b6bf7b713b99d8556f1d9444"), 8254390023}, |
||||
{15462792, common.HexToHash("0x3348ca4e14ac8d3c6ac6df676deaf3e3b5e0a11b599f73bd9739b74ebd693efe"), 8321499024}, |
||||
{15509914, common.HexToHash("0xbc98fd6b71438d5a169f9373172fea799fa3d22a8e6fe648d35e1070f2261113"), 8388606521}, |
||||
{15558748, common.HexToHash("0x5fa2cf499276ae74a5b8618990e71ed11a063619afe25c01b46e6252eba14c19"), 8455716577}, |
||||
{15604217, common.HexToHash("0x78a608e13d2eb3c5fed81a19b829ede88071cf01ea9ff58112a7472435f97c30"), 8522825668}, |
||||
{15651869, common.HexToHash("0xd465d861d925d1475440782ff16c2b3361ba3c8e169d7cc90eb8dfc0f31b0aac"), 8589934080}, |
||||
{15700968, common.HexToHash("0x71e3def131271e02c06ca945d14a995703a48faac1334a9e2e2321edd0b504d0"), 8657043390}, |
||||
{15762986, common.HexToHash("0x9b1b51dca2eae29162ca66968a77b45175f134b44aea3defadcb924f83e0b944"), 8724151376}, |
||||
{15814455, common.HexToHash("0x3c04a509cb6304d3df4bef57e0119d9e615ab737ec0b4a7deada6e5f57d9f873"), 8791260562}, |
||||
{15865639, common.HexToHash("0x9e9e26148c774518ecf362c0e7c65a5c1b054a8a3e4e36036c70e273fac6147c"), 8858368894}, |
||||
{15920564, common.HexToHash("0x9efe1d4dbfd9aa891ac0cffd3e1422a27ba2ea4add211b6900a2242cdb0f0ca0"), 8925477950}, |
||||
{15974371, common.HexToHash("0xc63ccef7bc35a0b431a411f99fe581b322d00cfc6422d078696808a5658a32ac"), 8992587107}, |
||||
{16032913, common.HexToHash("0x3e60957224964669a8646914e3166553b9f4256d5be160b17995d838af3ef137"), 9059696632}, |
||||
{16091057, common.HexToHash("0x12b346047bb49063ab6d9e737775924cf05c52114202ddb1a2bdaf9caabbfe0c"), 9126804912}, |
||||
{16150977, common.HexToHash("0x49318a32ff0ce979c4061c1c34db2a94fb06e7669c93742b75aff14a134fa598"), 9193913896}, |
||||
{16207432, common.HexToHash("0xf7870865edf81be4389a0be01468da959de703df0d431610814d16ed480176e4"), 9261019778}, |
||||
{16262582, common.HexToHash("0x25818e0f4d54af6c44ef7b23add34409a47de3ab1c905889478f3ec8ad173ec3"), 9328131320}, |
||||
{16319695, common.HexToHash("0x25de4b1c18cc503f5d12b4fa9072d33a11fa503a3dbeb9ab3d016b57c1e5cd4d"), 9395240790}, |
||||
{16373605, common.HexToHash("0x3794a5e0d2aa10baf1e6a5ec623d6089fdd39799eff633017d8df5144526939f"), 9462349509}, |
||||
{16423494, common.HexToHash("0xe0217d947ba3865dfc9288e0c890b0996457bb9d18467bd125e86bbb0052b57f"), 9529458033}, |
||||
{16474853, common.HexToHash("0xd454f033d190f22f9e56f0209ea1eeb3b6257805d5d88650d2759eb4d24821b7"), 9596567055}, |
||||
{16525689, common.HexToHash("0x8a23cbbf3e258e13f5a1ada434366796cb4a3e5b1062455582fb2bc3ab991541"), 9663674943}, |
||||
{16574203, common.HexToHash("0xc1a5b7d26e8222bd2d56ef4108f75d69f7c116707d348950834e00962241a4f8"), 9730785112}, |
||||
{16622622, common.HexToHash("0x3ddb3ef7a4309bd788258fb0d62613c89a0b4de715f4e12f6017a194d19d6481"), 9797893665}, |
||||
{16672585, common.HexToHash("0x8aa5e9f72b261f9e2a9eb768483d1bbd84d3a88fdb1346f6a9a7f262fd28ba41"), 9865002893}, |
||||
{16720124, common.HexToHash("0x2128f8baf264166e37554d5c31a06de58d9ccfb663117358251da548a23a060f"), 9932111275}, |
||||
{16769162, common.HexToHash("0x6b3e849482d3222032740ad6b8f98e24636c82682a6a3572b1ef76dfebc66821"), 9999217824}, |
||||
{16818311, common.HexToHash("0xe45f57381978a2bfc85bd20af1c41e2b630412642ac4f606b477f05f030ef5d9"), 10066328668}, |
||||
{16869531, common.HexToHash("0xa154555266d24dc1f4885af5fafcf8cab3de788998cf69e1d28f56aa13a40c43"), 10133437302}, |
||||
{16921611, common.HexToHash("0xf1f829b4ab5eec6e243916dd530993fa11eef5510fd730e8d09ead6b380355a1"), 10200547185}, |
||||
{16974870, common.HexToHash("0x1a33202b95926ae4cb8e6e99d8d150f3c50d817b3a316452bdf428c971dabde5"), 10267655914}, |
||||
{17031277, common.HexToHash("0x706c9dd0dc81e7ac29d2ea0f826e6b8a1dcb5adb1b904ff6e43260729c9fd0a7"), 10334764934}, |
||||
{17086330, common.HexToHash("0x085a80cafe96b520105b9a1f8e7a2bbc9474da24da7e6344ca7c4d32db822f92"), 10401871892}, |
||||
{17141311, common.HexToHash("0x33ec6513dfa515bc5f6356476b4eb075a8064181d6aaf6aa1a1e18887e342f74"), 10468982364}, |
||||
{17190907, common.HexToHash("0x6f41273d3bf30d3347e7eb68872a49b3ac947f314543478be7a28a55e5c41a3c"), 10536090817}, |
||||
{17237199, common.HexToHash("0x9a87a14a128c0345a366940f821a14f16719de628658ac0628e410a72d723e90"), 10603200178}, |
||||
{17287181, common.HexToHash("0x9c6e78adcf562ac63c103e3e5a02f025023079aca79bdd6ef18f7bd2a6271c29"), 10670309183}, |
||||
{17338652, common.HexToHash("0x1b747da97b2397a293602af57514dab4ca1010bb6c601ff05cb2012dd1124ebb"), 10737418023}, |
||||
{17389337, common.HexToHash("0xbc3c0ca1e5989605b9b59c94b418562eb17ccbce30e45ac8531cf0b3867a6b2c"), 10804522857}, |
||||
{17442261, common.HexToHash("0x1ec341be1cbd09f559bfa3d3e39a341d8e21052eeb7880931d43d086651733b7"), 10871635535}, |
||||
{17497787, common.HexToHash("0x6069880d486f2548599df1e14e12752d3eb9bc99843a98cd6631c22be1b58554"), 10938744657}, |
||||
{17554322, common.HexToHash("0x69b2564bc00b1f310f6b416912869d7530d7864bf7d70d55c7ace554f129b989"), 11005852829}, |
||||
{17608492, common.HexToHash("0x7d590653d5fa52c0d3ee453a77d2088504f57adcef35cd57c567afb554608457"), 11072961972}, |
||||
{17664272, common.HexToHash("0xdc16159d3500cdc7410873102f41fc55de2a8a41e3779c4b70e6224a541e2b9e"), 11140070967}, |
||||
{17715101, common.HexToHash("0x655e33c4e81182464ea0b0e1fdbc53ce53902431db5107326b816091a4564652"), 11207179487}, |
||||
{17764042, common.HexToHash("0x54439184f31cd83ba06b48b6dbfdd744ae7246355be1327b44744058711d05c0"), 11274287303}, |
||||
{17814383, common.HexToHash("0xfb453bc951360c76fb09bb1b9a3e39d23ececa0adb93368cc3f41f0457845089"), 11341397984}, |
||||
{17864648, common.HexToHash("0x32a68823ef4ec0cbab2fe50c97e3f462b575e8b117da40d00c710b4c66ee1d6d"), 11408505657}, |
||||
{17913366, common.HexToHash("0x04b944aab8a4ff91b77c2191817cf051766100c227616a3746af53407e740124"), 11475614351}, |
||||
{17961690, common.HexToHash("0x08bee7cc0b764106ca01dd5370b617879487ffb423688c96e948dce125990f45"), 11542723488}, |
||||
{18011048, common.HexToHash("0x94c39d3a64f3e9a91b1d98554cd29e1390e30fa61cfa4e909c503eee2fd9f165"), 11609833142}, |
||||
{18061209, common.HexToHash("0x2ee9ade68955c030488c8a30537bdf948355f7dd5ae64942b5bfce1be6650e19"), 11676941316}, |
||||
{18111692, common.HexToHash("0xd6c4fd0c1cc20ed5e7960bb5043e9e5e9c66a4d2ec5709ac9797fff678435640"), 11744050346}, |
||||
{18166212, common.HexToHash("0x3262588c2ef79a3b3f6a3db6435202d22f5667cd48c136b0797404901525c9ff"), 11811159686}, |
||||
{18218743, common.HexToHash("0x935bd9a4164ff7ecd09a37b916ce5bf78487bd19377b5b17be153e39318aee74"), 11878268593}, |
||||
{18271236, common.HexToHash("0xe58ebb821f27e3665898f390802a3d129d217b3a3ee36d890a85cf22a0a8aa33"), 11945376750}, |
||||
{18323007, common.HexToHash("0x3997a841468efa1bc614bfc3de4502274901b04b428f87a1f3086dfd78cda1eb"), 12012485748}, |
||||
{18372443, common.HexToHash("0xc44a13a5d02e8dc39f355de5e21ce7bb311ce7f4d9114ff480dce235a169e416"), 12079595370}, |
||||
{18421829, common.HexToHash("0x7da63a0b613d8745597b2ac64fd5cc8b2fb14b24d163b12a0a39d7d3d4ff7b5c"), 12146703582}, |
||||
{18471706, common.HexToHash("0xd632a1893f415ff618f4b612a7687e6af1f12feeed81f46f0022090829c1eb4c"), 12213812677}, |
||||
{18522301, common.HexToHash("0x44fa2cf08145ae40e8e42f4e6b4ab7df360a17c5a065ce45fcc41b51bee011f4"), 12280921639}, |
||||
{18572935, common.HexToHash("0x72b8ab4c78c90425ee054b4806a8be703da0febdf1d51866358ec2bd21ba9529"), 12348029751}, |
||||
{18623431, common.HexToHash("0x8c4cb2f13501d9788820280c6f16692d0737258c3896f1e4bded32d838febf7f"), 12415138965}, |
||||
{18675470, common.HexToHash("0x523b73c19ea8b3ae32ef141a83ef9855e667ebf51443cfcabd1a06659359062a"), 12482247454}, |
||||
{18725728, common.HexToHash("0x0cfbd131eb5dad51488238079fba29a63eebb5c32d1a543cb072e48dc2104ef3"), 12549356369}, |
||||
{18778387, common.HexToHash("0xc4906c77af8058b9f172a4f0e8788c7887f05caa5ac752b38b5387080f74ae49"), 12616465992}, |
||||
{18835044, common.HexToHash("0x49c5e07f409a841dc81f3ef8417f1951f8fcc13c90134f9d2a0cd11938f9fa36"), 12683575082}, |
||||
{18883308, common.HexToHash("0x386a58dd5f79a419eeb05075b07b3ff3bc836a265c9688854a504223b1d6a830"), 12750683753}, |
||||
{18933635, common.HexToHash("0xd3881292147589bd2e192769e5c9175b5d03a453fe1ef3c4b5b6858ac9402a2f"), 12817792470}, |
||||
{18988254, common.HexToHash("0xcbe72dfa15428ac21b9c59c703ceaa0eb4b2205927687261d7aaed3dbb3783ea"), 12884882858}, |
||||
{19041325, common.HexToHash("0x92b077e1c2f8819da728f0307c914fdcd57eba14ea07d9a45c28d1ed8ffff576"), 12952010530}, |
||||
{19089163, common.HexToHash("0x43f8ab2d3dfc34c8e18cba903074d54e235dc546f19c4eb78245a522c266c84e"), 13019119228}, |
||||
{19140629, common.HexToHash("0xab7b7ae5424b18105a13b657fa6099d4ab67fde5baff39fe6e4de707397e995c"), 13086228236}, |
||||
{19192118, common.HexToHash("0x451327e6a5cf6ce1c8c14c01687dc5f719f3c2176f46bac4f264616256e30d1c"), 13153337116}, |
||||
{19237836, common.HexToHash("0x9b260d6be369557d1dc88aca423e2697e697d941d1b726c183015b5649e248c8"), 13220445421}, |
||||
{19291271, common.HexToHash("0x4878c28d79e1f71bc11e062eb61cb52ae6a18b670b0f9bea38b477944615078e"), 13287554254}, |
||||
{19344448, common.HexToHash("0x56243b9ad863bf90953fe9aa6e64a426629384db1190e70dce79575d30595f7e"), 13354663659}, |
||||
{19394948, common.HexToHash("0x195173b64dda7908d6aa39a63c8bdd29ec181d401e369d513be1308550d0ddcb"), 13421771935}, |
||||
{19443075, common.HexToHash("0xd39c1d60996475e65d1ab5b4e755f510ca466564a8155d35db6667988d6c0e44"), 13488880427}, |
||||
{19488383, common.HexToHash("0x28956eb8856fa8db59c02585016b8baf43bc44bc35b00bdaf8a6babe51101c5c"), 13555977105}, |
||||
{19534584, common.HexToHash("0x2421c97b0f140185d4c20943cd4ed7d7424468482feb76e3003a1cc69da3fd7b"), 13623097580}, |
||||
{19579602, common.HexToHash("0x25f96529028e9f51c59aec9ce8de282b7dd67066fd46a1694130698ed0f40d8b"), 13690207623}, |
||||
{19621517, common.HexToHash("0x4f6f6e0a0488f3d51823bc4b07c292348c259b1866968f77ee76b66b37101c75"), 13757315529}, |
||||
{19665085, common.HexToHash("0x00f9315f89681b44bff46f1bad8894bc6dfae1c459d3d6520f9881861304a496"), 13824425382}, |
||||
{19709229, common.HexToHash("0x24e022b21ae1ba8a3e8c87cb9734aa1d1810fc4a69fe147d3ebb1ff0df8bcc15"), 13891534799}, |
||||
{19755387, common.HexToHash("0x77f184b7183b1a351760d242041249464b42cfaa6fbc4326f352b06bb3b21a02"), 13958642483}, |
||||
{19803894, common.HexToHash("0xf37eb1b054a6d61272940361f386eb744cded84d15c3250a7eabadede257371c"), 14025751618}, |
||||
{19847885, common.HexToHash("0x4659649fa8a3b4bbe8978673ba9a22ae20352c7052b676d373b5a51b1967ffa4"), 14092848654}, |
||||
{19894193, common.HexToHash("0x15606bdc0f1a710bd69443c7154d4979aece9329977b65990c9b39d6df84ed5c"), 14159970181}, |
||||
{19938551, common.HexToHash("0x6a8f4571924ed902bd8e71bf8ed9cc9d72cabeabc410277c8f0fc2b477d00eb7"), 14227077892}, |
||||
{19985354, common.HexToHash("0x7b6fb6376410b4d9e5d7ee02f78b2054e005dd2976eea47fc714f66b967dc285"), 14294187965}, |
||||
{20028440, common.HexToHash("0x9b37440b71c24756b8855b8012432b84276ae94c80aa1ccc8b70a7705992103c"), 14361296503}, |
||||
{20071780, common.HexToHash("0xa2ed129f343f3d60419772ec5635edcd36b8680c9419b6626e2bc84b230c709b"), 14428405230}, |
||||
{20113832, common.HexToHash("0xe7a610e8bcbf8ded141ebc7142de03dfc54b1bcc79e3bf8d07fad4e42b665bba"), 14495512019}, |
||||
{20156854, common.HexToHash("0xbe09704f65a70ef8843d9c8e511ddc989ea139dbe94cdfe37f52b03620d62385"), 14562622430}, |
||||
{20200135, common.HexToHash("0x9a58c34d5f77342e94065d119905c000223cd988c4b11f1539fff20737159630"), 14629731923}, |
||||
{20244389, common.HexToHash("0x1e733f0db9ef21183107259b3c2408c78fa5a01469928cd295f3ea7e8eedda45"), 14696840011}, |
||||
{20288489, common.HexToHash("0xb5ad7edd86b181226c8c7be0a08069e3955234e797426843fff9de0f57ec59cc"), 14763949714}, |
||||
{20333582, common.HexToHash("0x8040c209f5cd1738ee0f85c2f1db7c43a420d148680c7390fd1701b9f0bb671a"), 14831058335}, |
||||
{20377087, common.HexToHash("0x08fdc4cd246b6ae9d4a45646b0aed6af3bb330eb6cd4c8b93646157e7b002b84"), 14898167722}, |
||||
{20421699, common.HexToHash("0x5a2912b5fc2f02df33b655155990f92dcaacda5b75427fe3d87fb38f36b1c17d"), 14965275691}, |
||||
{20467194, common.HexToHash("0x3deaf4325c461004b090b0261996c645ab529c1471feaf7dc2bbe1f128180297"), 15032385211}, |
||||
{20512397, common.HexToHash("0x37e39697ec1b7683a6202be250ffaee7a1102e8030f87550b94af05ec66cec83"), 15099493973}, |
||||
{20557443, common.HexToHash("0x8e9c04468f3111eab8b1f6a58b277862c624861c237cadecc53ec249bd811bda"), 15166602882}, |
||||
{20595899, common.HexToHash("0x9787555fe57e4650002257eb2c88f1ef435b99d406e33fe2f889be180123ef25"), 15233709908}, |
||||
{20638606, common.HexToHash("0x70681cffd159ce2e580dbbbe8fa6b5343dbcb081429cdda6c577e615bef4ef05"), 15300820678}, |
||||
{20683605, common.HexToHash("0xb32662d5e241132ffe2249caea67f5746a6f4382297b2ac87c81e2794faf1f7a"), 15367929350}, |
||||
{20728630, common.HexToHash("0x15a817c846928b673032d5eacd0cff7a04217d268457aa30a322ecca32be4d49"), 15435037830}, |
||||
{20771519, common.HexToHash("0x542bc7b9804bbc45f4be470f4dc56f215a4dec71fed71eba2ffc804afd262b95"), 15502145990}, |
||||
{20815097, common.HexToHash("0x798cdd51c964fcf18561d70095d9613b84ba836817972799c9dfd0bfbe1e042b"), 15569256033}, |
||||
{20857859, common.HexToHash("0xfb5bb066d419a651d8e0186569eb4e8d8bcd5181d8f02e0d578b5dfe2fc738dd"), 15636364671}, |
||||
{20896890, common.HexToHash("0x834b8d6fad779e4cf8214128f6c93d7387b6d6279e517f6f0a284b5d831cc3ae"), 15703472902}, |
||||
{20939387, common.HexToHash("0x7adee7c78420c711efa216c61e0b561e581d7ff0331efd91ee16a609b34cfdc2"), 15770582325}, |
||||
{20981303, common.HexToHash("0x6f5d7b0cc6dad5eb258176e07de21795a8347d68f7303f06934046e0236bea6d"), 15837691713}, |
||||
{21023216, common.HexToHash("0x96cfe35a45df1297a36f42c59ebe706ab0473dfbf59ce910b5c5a8dbf696de1c"), 15904799667}, |
||||
{21068378, common.HexToHash("0x93753875ff330d922b23f823203198f3b1bb8833367c6b6a8f896ff54be2c12d"), 15971909040}, |
||||
{21112445, common.HexToHash("0x6ac02fa6ae486b86aba562eaf6f3d883befaa8ebedcfd8d74bdb7368d42deee3"), 16039003625}, |
||||
{21155992, common.HexToHash("0x25f76896b4b693bafb79e9a535e2bf00ed62a577e35209749346e8e79a60bb71"), 16106126344}, |
||||
{21200962, common.HexToHash("0x725f2befe913cb2659d262e2d3b6f79a706b31c557d52669471da22347ec8287"), 16173235265}, |
||||
{21244663, common.HexToHash("0x6778c4194f54e70939da38853daddb22bfaf160d35617ab05d0f5c476741147b"), 16240344735}, |
||||
{21290273, common.HexToHash("0x433ac819c40bd3061205fe0ece0645eec73f54a0a5c1559c981f983345bc0154"), 16307453543}, |
||||
{21336156, common.HexToHash("0x261dc8c1639d505624150d2388d15ed10bfb4c3ce9c0c327a4ec26531689a097"), 16374562466}, |
||||
{21378880, common.HexToHash("0x5c78b2b70553140dfdfdd4f415b98f88e74f74662315834038fd99042277d917"), 16441671104}, |
||||
{21421613, common.HexToHash("0x854532f9d1c77627b763f9cbc7099a653d59554ed57fa763bc218834c82955fe"), 16508780351}, |
||||
{21466875, common.HexToHash("0xb8b83cc62084e948235ef4b5973bf7fd988fa28bcaa72f7d38ad8e50de729618"), 16575888599}, |
||||
{21511942, common.HexToHash("0xe806a28bc1b7f8cd752c8ceedbe081d49773d4558a9fb95e3357c0c07172522d"), 16642996907}, |
||||
{21550291, common.HexToHash("0x1f3e26d303e7a2a9b0614f12f62b189da365b3947c5fe2d99ed2711b37fe7daa"), 16710106826}, |
||||
{21592690, common.HexToHash("0xa1408cfbc693faee4425e8fd9e83a181be535c33f874b56c3a7a114404c4f686"), 16777215566}, |
||||
{21636275, common.HexToHash("0x704734c2d0351f8ccd38721a9a4b80c063368afaaa857518d98498180a502bba"), 16844323959}, |
||||
}, |
||||
} |
@ -0,0 +1,679 @@ |
||||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package filtermaps |
||||
|
||||
import ( |
||||
"fmt" |
||||
"os" |
||||
"bytes" |
||||
"errors" |
||||
"sync" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/common/lru" |
||||
"github.com/ethereum/go-ethereum/core/rawdb" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/ethdb/leveldb" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
) |
||||
|
||||
// FilterMaps is the in-memory representation of the log index structure that is
|
||||
// responsible for building and updating the index according to the canonical
|
||||
// chain.
|
||||
// Note that FilterMaps implements the same data structure as proposed in EIP-7745
|
||||
// without the tree hashing and consensus changes:
|
||||
// https://eips.ethereum.org/EIPS/eip-7745
|
||||
type FilterMaps struct { |
||||
closeCh chan struct{} |
||||
closeWg sync.WaitGroup |
||||
history, unindexLimit uint64 |
||||
noHistory bool |
||||
exportFileName string |
||||
Params |
||||
|
||||
db ethdb.KeyValueStore |
||||
|
||||
// fields written by the indexer and read by matcher backend. Indexer can
|
||||
// read them without a lock and write them under indexLock write lock.
|
||||
// Matcher backend can read them under indexLock read lock.
|
||||
indexLock sync.RWMutex |
||||
filterMapsRange |
||||
indexedView chainView // always consistent with the log index
|
||||
|
||||
// also accessed by indexer and matcher backend but no locking needed.
|
||||
filterMapCache *lru.Cache[uint32, filterMap] |
||||
lastBlockCache *lru.Cache[uint32, lastBlockOfMap] |
||||
lvPointerCache *lru.Cache[uint64, uint64] |
||||
baseRowsCache *lru.Cache[uint64, [][]uint32] |
||||
|
||||
// the matchers set and the fields of FilterMapsMatcherBackend instances are
|
||||
// read and written both by exported functions and the indexer.
|
||||
// Note that if both indexLock and matchersLock needs to be locked then
|
||||
// indexLock should be locked first.
|
||||
matchersLock sync.Mutex |
||||
matchers map[*FilterMapsMatcherBackend]struct{} |
||||
|
||||
// fields only accessed by the indexer (no mutex required).
|
||||
renderSnapshots *lru.Cache[uint64, *renderedMap] |
||||
startedHeadIndex, startedTailIndex, startedTailUnindex bool |
||||
startedHeadIndexAt, startedTailIndexAt, startedTailUnindexAt time.Time |
||||
loggedHeadIndex, loggedTailIndex bool |
||||
lastLogHeadIndex, lastLogTailIndex time.Time |
||||
ptrHeadIndex, ptrTailIndex, ptrTailUnindexBlock uint64 |
||||
ptrTailUnindexMap uint32 |
||||
|
||||
targetView chainView |
||||
matcherSyncRequest *FilterMapsMatcherBackend |
||||
stop bool |
||||
TargetViewCh chan chainView |
||||
BlockProcessingCh chan bool |
||||
blockProcessing bool |
||||
matcherSyncCh chan *FilterMapsMatcherBackend |
||||
waitIdleCh chan chan bool |
||||
tailRenderer *mapRenderer |
||||
|
||||
// test hooks
|
||||
testDisableSnapshots, testSnapshotUsed bool |
||||
} |
||||
|
||||
// filterMap is a full or partial in-memory representation of a filter map where
|
||||
// rows are allowed to have a nil value meaning the row is not stored in the
|
||||
// structure. Note that therefore a known empty row should be represented with
|
||||
// a zero-length slice.
|
||||
// It can be used as a memory cache or an overlay while preparing a batch of
|
||||
// changes to the structure. In either case a nil value should be interpreted
|
||||
// as transparent (uncached/unchanged).
|
||||
type filterMap []FilterRow |
||||
|
||||
// copy returns a copy of the given filter map. Note that the row slices are
|
||||
// copied but their contents are not. This permits extending the rows further
|
||||
// (which happens during map rendering) without affecting the validity of
|
||||
// copies made for snapshots during rendering.
|
||||
func (fm filterMap) copy() filterMap { |
||||
c := make(filterMap, len(fm)) |
||||
copy(c, fm) |
||||
return c |
||||
} |
||||
|
||||
// FilterRow encodes a single row of a filter map as a list of column indices.
|
||||
// Note that the values are always stored in the same order as they were added
|
||||
// and if the same column index is added twice, it is also stored twice.
|
||||
// Order of column indices and potential duplications do not matter when searching
|
||||
// for a value but leaving the original order makes reverting to a previous state
|
||||
// simpler.
|
||||
type FilterRow []uint32 |
||||
|
||||
// Equal returns true if the given filter rows are equivalent.
|
||||
func (a FilterRow) Equal(b FilterRow) bool { |
||||
if len(a) != len(b) { |
||||
return false |
||||
} |
||||
for i, v := range a { |
||||
if b[i] != v { |
||||
return false |
||||
} |
||||
} |
||||
return true |
||||
} |
||||
|
||||
// filterMapsRange describes the rendered range of filter maps and the range
|
||||
// of fully rendered blocks.
|
||||
type filterMapsRange struct { |
||||
initialized bool |
||||
targetBlockNumber uint64 |
||||
targetBlockId common.Hash |
||||
headBlockDelimiter uint64 // zero if afterLastIndexedBlock != targetBlockNumber
|
||||
// if initialized then all maps are rendered between firstRenderedMap and
|
||||
// afterLastRenderedMap-1
|
||||
firstRenderedMap, afterLastRenderedMap uint32 |
||||
// if tailPartialEpoch > 0 then maps between firstRenderedMap-mapsPerEpoch and
|
||||
// firstRenderedMap-mapsPerEpoch+tailPartialEpoch-1 are rendered
|
||||
tailPartialEpoch uint32 |
||||
// if initialized then all log values belonging to blocks between
|
||||
// firstIndexedBlock and afterLastIndexedBlock are fully rendered
|
||||
// blockLvPointers are available between firstIndexedBlock and afterLastIndexedBlock-1
|
||||
firstIndexedBlock, afterLastIndexedBlock uint64 |
||||
} |
||||
|
||||
// hasIndexedBlocks returns true if the range has at least one fully indexed block.
|
||||
func (fmr *filterMapsRange) hasIndexedBlocks() bool { |
||||
return fmr.initialized && fmr.afterLastIndexedBlock > fmr.firstIndexedBlock |
||||
} |
||||
|
||||
// lastBlockOfMap is used for caching the (number, id) pairs belonging to the
|
||||
// last block of each map.
|
||||
type lastBlockOfMap struct { |
||||
number uint64 |
||||
id common.Hash |
||||
} |
||||
|
||||
// NewFilterMaps creates a new FilterMaps and starts the indexer.
|
||||
func NewFilterMaps(db ethdb.KeyValueStore, initView chainView, params Params, history, unindexLimit uint64, noHistory bool, exportFileName string) *FilterMaps { |
||||
rs, initialized, err := rawdb.ReadFilterMapsRange(db) |
||||
if err != nil { |
||||
log.Error("Error reading log index range", "error", err) |
||||
} |
||||
params.deriveFields() |
||||
f := &FilterMaps{ |
||||
db: db, |
||||
closeCh: make(chan struct{}), |
||||
waitIdleCh: make(chan chan bool), |
||||
TargetViewCh: make(chan chainView), |
||||
BlockProcessingCh: make(chan bool), |
||||
history: history, |
||||
noHistory: noHistory, |
||||
unindexLimit: unindexLimit, |
||||
exportFileName: exportFileName, |
||||
Params: params, |
||||
filterMapsRange: filterMapsRange{ |
||||
initialized: initialized, |
||||
targetBlockId: rs.TargetBlockId, |
||||
targetBlockNumber: rs.TargetBlockNumber, |
||||
headBlockDelimiter: rs.HeadBlockDelimiter, |
||||
firstIndexedBlock: rs.FirstIndexedBlock, |
||||
afterLastIndexedBlock: rs.AfterLastIndexedBlock, |
||||
firstRenderedMap: rs.FirstRenderedMap, |
||||
afterLastRenderedMap: rs.AfterLastRenderedMap, |
||||
tailPartialEpoch: rs.TailPartialEpoch, |
||||
}, |
||||
matcherSyncCh: make(chan *FilterMapsMatcherBackend), |
||||
matchers: make(map[*FilterMapsMatcherBackend]struct{}), |
||||
filterMapCache: lru.NewCache[uint32, filterMap](3), //TODO named consts
|
||||
lastBlockCache: lru.NewCache[uint32, lastBlockOfMap](1000), |
||||
lvPointerCache: lru.NewCache[uint64, uint64](1000), |
||||
baseRowsCache: lru.NewCache[uint64, [][]uint32](100), |
||||
renderSnapshots: lru.NewCache[uint64, *renderedMap](cachedRevertPoints), |
||||
} |
||||
f.targetView = initView |
||||
if f.initialized { |
||||
f.indexedView = f.initChainView(f.targetView) |
||||
f.targetBlockNumber = f.indexedView.headNumber() |
||||
f.targetBlockId = f.indexedView.getBlockId(f.targetBlockNumber) |
||||
} |
||||
if f.hasIndexedBlocks() { |
||||
log.Info("Log index range", "first block", f.firstIndexedBlock, "last block", f.afterLastIndexedBlock-1, "first map", f.firstRenderedMap, "last map", f.afterLastRenderedMap-1) |
||||
} |
||||
f.exportCheckpoints() |
||||
return f |
||||
} |
||||
|
||||
// Start starts the indexer.
|
||||
func (f *FilterMaps) Start() { |
||||
if !f.testDisableSnapshots && f.initialized && |
||||
f.afterLastIndexedBlock == f.targetBlockNumber+1 && |
||||
f.firstRenderedMap < f.afterLastRenderedMap { |
||||
// previous target head rendered; load last map as snapshot
|
||||
if err := f.loadHeadSnapshot(); err != nil { |
||||
log.Error("Could not load head filter map snapshot", "error", err) |
||||
} |
||||
} |
||||
f.closeWg.Add(1) |
||||
go f.indexerLoop() |
||||
} |
||||
|
||||
// Stop ensures that the indexer is fully stopped before returning.
|
||||
func (f *FilterMaps) Stop() { |
||||
close(f.closeCh) |
||||
f.closeWg.Wait() |
||||
} |
||||
|
||||
// initChainView returns a chain view consistent with both the current target
|
||||
// view and the current state of the log index as found in the database, based
|
||||
// on the last block of stored maps.
|
||||
// Note that the returned view might be shorter than the existing index if
|
||||
// the latest maps are not consistent with targetView.
|
||||
func (f *FilterMaps) initChainView(chainView chainView) chainView { |
||||
mapIndex := f.afterLastRenderedMap |
||||
for { |
||||
var ok bool |
||||
mapIndex, ok = f.lastMapBoundaryBefore(mapIndex) |
||||
if !ok { |
||||
break |
||||
} |
||||
lastBlockNumber, lastBlockId, err := f.getLastBlockOfMap(mapIndex) |
||||
if err != nil { |
||||
log.Error("Could not initialize indexed chain view", "error", err) |
||||
break |
||||
} |
||||
if lastBlockNumber <= chainView.headNumber() && chainView.getBlockId(lastBlockNumber) == lastBlockId { |
||||
return newLimitedChainView(chainView, lastBlockNumber) |
||||
} |
||||
} |
||||
return newLimitedChainView(chainView, 0) |
||||
} |
||||
|
||||
// reset un-initializes the FilterMaps structure and removes all related data from
|
||||
// the database. The function returns true if everything was successfully removed.
|
||||
func (f *FilterMaps) reset() bool { |
||||
f.indexLock.Lock() |
||||
f.filterMapsRange = filterMapsRange{} |
||||
f.indexedView = nil |
||||
f.filterMapCache.Purge() |
||||
f.renderSnapshots.Purge() |
||||
f.lastBlockCache.Purge() |
||||
f.lvPointerCache.Purge() |
||||
f.baseRowsCache.Purge() |
||||
f.indexLock.Unlock() |
||||
// deleting the range first ensures that resetDb will be called again at next
|
||||
// startup and any leftover data will be removed even if it cannot finish now.
|
||||
rawdb.DeleteFilterMapsRange(f.db) |
||||
return f.removeDbWithPrefix(rawdb.FilterMapsPrefix, "Resetting log index database") |
||||
} |
||||
|
||||
// init initializes an empty log index according to the current targetView.
|
||||
func (f *FilterMaps) init() error { |
||||
var bestIdx, bestLen int |
||||
for idx, checkpointList := range checkpoints { |
||||
// binary search for the last matching epoch head
|
||||
min, max := 0, len(checkpointList) |
||||
for min < max { |
||||
mid := (min + max + 1) / 2 |
||||
cp := checkpointList[mid-1] |
||||
if cp.blockNumber <= f.targetView.headNumber() && f.targetView.getBlockId(cp.blockNumber) == cp.blockId { |
||||
min = mid |
||||
} else { |
||||
max = mid - 1 |
||||
} |
||||
} |
||||
if max > bestLen { |
||||
bestIdx, bestLen = idx, max |
||||
} |
||||
} |
||||
batch := f.db.NewBatch() |
||||
for epoch := 0; epoch < bestLen; epoch++ { |
||||
cp := checkpoints[bestIdx][epoch] |
||||
f.storeLastBlockOfMap(batch, (uint32(epoch+1)<<f.logMapsPerEpoch)-1, cp.blockNumber, cp.blockId) |
||||
f.storeBlockLvPointer(batch, cp.blockNumber, cp.firstLvIndex) |
||||
} |
||||
fmr := filterMapsRange{ |
||||
initialized: true, |
||||
targetBlockId: f.targetView.getBlockId(f.targetView.headNumber()), |
||||
targetBlockNumber: f.targetView.headNumber(), |
||||
} |
||||
if bestLen > 0 { |
||||
cp := checkpoints[bestIdx][bestLen-1] |
||||
fmr.firstIndexedBlock = cp.blockNumber + 1 |
||||
fmr.afterLastIndexedBlock = cp.blockNumber + 1 |
||||
fmr.firstRenderedMap = uint32(bestLen) << f.logMapsPerEpoch |
||||
fmr.afterLastRenderedMap = uint32(bestLen) << f.logMapsPerEpoch |
||||
} |
||||
f.indexedView = f.targetView |
||||
f.setRange(batch, fmr, true) |
||||
return batch.Write() |
||||
} |
||||
|
||||
// removeDbWithPrefix removes data with the given prefix from the database and
|
||||
// returns true if everything was successfully removed.
|
||||
func (f *FilterMaps) removeDbWithPrefix(prefix []byte, action string) bool { |
||||
it := f.db.NewIterator(prefix, nil) |
||||
hasData := it.Next() |
||||
it.Release() |
||||
if !hasData { |
||||
return true |
||||
} |
||||
|
||||
end := bytes.Clone(prefix) |
||||
end[len(end)-1]++ |
||||
start := time.Now() |
||||
var retry bool |
||||
for { |
||||
err := f.db.DeleteRange(prefix, end) |
||||
if err == nil { |
||||
log.Info(action+" finished", "elapsed", time.Since(start)) |
||||
return true |
||||
} |
||||
if err != leveldb.ErrTooManyKeys { |
||||
log.Error(action+" failed", "error", err) |
||||
return false |
||||
} |
||||
select { |
||||
case <-f.closeCh: |
||||
return false |
||||
default: |
||||
} |
||||
if !retry { |
||||
log.Info(action + " in progress...") |
||||
retry = true |
||||
} |
||||
} |
||||
} |
||||
|
||||
// setRange updates the covered range and also adds the changes to the given batch.
|
||||
// Note that this function assumes that the index write lock is being held.
|
||||
func (f *FilterMaps) setRange(batch ethdb.KeyValueWriter, newRange filterMapsRange, updateMatchers bool) { |
||||
if f.indexedView != nil && f.indexedView.getBlockId(newRange.targetBlockNumber) != newRange.targetBlockId { |
||||
panic("indexed range inconsistent with canonical chain") |
||||
} |
||||
f.filterMapsRange = newRange |
||||
if updateMatchers { |
||||
f.updateMatchersValidRange() |
||||
} |
||||
if newRange.initialized { |
||||
rs := rawdb.FilterMapsRange{ |
||||
TargetBlockId: newRange.targetBlockId, |
||||
TargetBlockNumber: newRange.targetBlockNumber, |
||||
HeadBlockDelimiter: newRange.headBlockDelimiter, |
||||
FirstIndexedBlock: newRange.firstIndexedBlock, |
||||
AfterLastIndexedBlock: newRange.afterLastIndexedBlock, |
||||
FirstRenderedMap: newRange.firstRenderedMap, |
||||
AfterLastRenderedMap: newRange.afterLastRenderedMap, |
||||
TailPartialEpoch: newRange.tailPartialEpoch, |
||||
} |
||||
rawdb.WriteFilterMapsRange(batch, rs) |
||||
} else { |
||||
rawdb.DeleteFilterMapsRange(batch) |
||||
} |
||||
} |
||||
|
||||
// getLogByLvIndex returns the log at the given log value index. If the index does
|
||||
// not point to the first log value entry of a log then no log and no error are
|
||||
// returned as this can happen when the log value index was a false positive.
|
||||
// Note that this function assumes that the log index structure is consistent
|
||||
// with the canonical chain at the point where the given log value index points.
|
||||
// If this is not the case then an invalid result or an error may be returned.
|
||||
// Note that this function assumes that the indexer read lock is being held when
|
||||
// called from outside the updateLoop goroutine.
|
||||
func (f *FilterMaps) getLogByLvIndex(lvIndex uint64) (*types.Log, error) { |
||||
mapIndex := uint32(lvIndex >> f.logValuesPerMap) |
||||
if mapIndex < f.firstRenderedMap || mapIndex >= f.afterLastRenderedMap { |
||||
return nil, nil |
||||
} |
||||
// find possible block range based on map to block pointers
|
||||
lastBlockNumber, _, err := f.getLastBlockOfMap(mapIndex) |
||||
var firstBlockNumber uint64 |
||||
if mapIndex > 0 { |
||||
firstBlockNumber, _, err = f.getLastBlockOfMap(mapIndex - 1) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
if firstBlockNumber < f.firstIndexedBlock { |
||||
firstBlockNumber = f.firstIndexedBlock |
||||
} |
||||
// find block with binary search based on block to log value index pointers
|
||||
for firstBlockNumber < lastBlockNumber { |
||||
midBlockNumber := (firstBlockNumber + lastBlockNumber + 1) / 2 |
||||
midLvPointer, err := f.getBlockLvPointer(midBlockNumber) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
if lvIndex < midLvPointer { |
||||
lastBlockNumber = midBlockNumber - 1 |
||||
} else { |
||||
firstBlockNumber = midBlockNumber |
||||
} |
||||
} |
||||
// get block receipts
|
||||
receipts := f.indexedView.getReceipts(firstBlockNumber) |
||||
if receipts == nil { |
||||
return nil, errors.New("receipts not found") |
||||
} |
||||
lvPointer, err := f.getBlockLvPointer(firstBlockNumber) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
// iterate through receipts to find the exact log starting at lvIndex
|
||||
for _, receipt := range receipts { |
||||
for _, log := range receipt.Logs { |
||||
if lvPointer > lvIndex { |
||||
// lvIndex does not point to the first log value (address value)
|
||||
// generated by a log as true matches should always do, so it
|
||||
// is considered a false positive (no log and no error returned).
|
||||
return nil, nil |
||||
} |
||||
if lvPointer == lvIndex { |
||||
return log, nil // potential match
|
||||
} |
||||
lvPointer += uint64(len(log.Topics) + 1) |
||||
} |
||||
} |
||||
return nil, nil |
||||
} |
||||
|
||||
// getFilterMap fetches an entire filter map from the database.
|
||||
func (f *FilterMaps) getFilterMap(mapIndex uint32) (filterMap, error) { |
||||
if fm, ok := f.filterMapCache.Get(mapIndex); ok { |
||||
return fm, nil |
||||
} |
||||
fm := make(filterMap, f.mapHeight) |
||||
for rowIndex := range fm { |
||||
var err error |
||||
fm[rowIndex], err = f.getFilterMapRow(mapIndex, uint32(rowIndex), false) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
f.filterMapCache.Add(mapIndex, fm) |
||||
return fm, nil |
||||
} |
||||
|
||||
// getFilterMapRow fetches the given filter map row. If baseLayerOnly is true
|
||||
// then only the first baseRowLength entries are returned.
|
||||
func (f *FilterMaps) getFilterMapRow(mapIndex, rowIndex uint32, baseLayerOnly bool) (FilterRow, error) { |
||||
baseMapRowIndex := f.mapRowIndex(mapIndex & -f.baseRowGroupLength, rowIndex) |
||||
baseRows, ok := f.baseRowsCache.Get(baseMapRowIndex) |
||||
if !ok { |
||||
var err error |
||||
baseRows, err = rawdb.ReadFilterMapBaseRows(f.db, baseMapRowIndex, f.baseRowGroupLength) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
f.baseRowsCache.Add(baseMapRowIndex, baseRows) |
||||
} |
||||
baseRow := baseRows[mapIndex & (f.baseRowGroupLength-1)] |
||||
if baseLayerOnly { |
||||
return baseRow, nil |
||||
} |
||||
extRow, err := rawdb.ReadFilterMapExtRow(f.db, f.mapRowIndex(mapIndex, rowIndex)) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return FilterRow(append(baseRow, extRow...)), nil |
||||
} |
||||
|
||||
// storeFilterMapRows stores a set of filter map rows at the corresponding map
|
||||
// indices and a shared row index.
|
||||
func (f *FilterMaps) storeFilterMapRows(batch ethdb.Batch, mapIndices []uint32, rowIndex uint32, rows []FilterRow) error { |
||||
for len(mapIndices) > 0 { |
||||
baseMapIndex := mapIndices[0] & -f.baseRowGroupLength |
||||
groupLength := 1 |
||||
for groupLength < len(mapIndices) && mapIndices[groupLength] & -f.baseRowGroupLength == baseMapIndex { |
||||
groupLength++ |
||||
} |
||||
if err := f.storeFilterMapRowsOfGroup(batch, mapIndices[:groupLength], rowIndex, rows[:groupLength]); err != nil { |
||||
return err |
||||
} |
||||
mapIndices, rows = mapIndices[groupLength:], rows[groupLength:] |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// storeFilterMapRowsOfGroup stores a set of filter map rows at map indices
|
||||
// belonging to the same base row group.
|
||||
func (f *FilterMaps) storeFilterMapRowsOfGroup(batch ethdb.Batch, mapIndices []uint32, rowIndex uint32, rows []FilterRow) error { |
||||
baseMapIndex := mapIndices[0] & -f.baseRowGroupLength |
||||
baseMapRowIndex := f.mapRowIndex(baseMapIndex, rowIndex) |
||||
baseRows, ok := f.baseRowsCache.Get(baseMapRowIndex) |
||||
if !ok { |
||||
var err error |
||||
baseRows, err = rawdb.ReadFilterMapBaseRows(f.db, baseMapRowIndex, f.baseRowGroupLength) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
} |
||||
for i, mapIndex := range mapIndices { |
||||
if mapIndex & -f.baseRowGroupLength != baseMapIndex { |
||||
panic("mapIndices are not in the same base row group") |
||||
} |
||||
baseRow := []uint32(rows[i]) |
||||
var extRow FilterRow |
||||
if uint32(len(rows[i])) > f.baseRowLength { |
||||
extRow = baseRow[f.baseRowLength:] |
||||
baseRow = baseRow[:f.baseRowLength] |
||||
} |
||||
baseRows[mapIndex & (f.baseRowGroupLength-1)] = baseRow |
||||
rawdb.WriteFilterMapExtRow(batch, f.mapRowIndex(mapIndex, rowIndex), extRow) |
||||
} |
||||
f.baseRowsCache.Add(baseMapRowIndex, baseRows) |
||||
rawdb.WriteFilterMapBaseRows(batch, baseMapRowIndex, baseRows) |
||||
return nil |
||||
}
|
||||
|
||||
// mapRowIndex calculates the unified storage index where the given row of the
|
||||
// given map is stored. Note that this indexing scheme is the same as the one
|
||||
// proposed in EIP-7745 for tree-hashing the filter map structure and for the
|
||||
// same data proximity reasons it is also suitable for database representation.
|
||||
// See also:
|
||||
// https://eips.ethereum.org/EIPS/eip-7745#hash-tree-structure
|
||||
func (f *FilterMaps) mapRowIndex(mapIndex, rowIndex uint32) uint64 { |
||||
epochIndex, mapSubIndex := mapIndex>>f.logMapsPerEpoch, mapIndex&(f.mapsPerEpoch-1) |
||||
return (uint64(epochIndex)<<f.logMapHeight+uint64(rowIndex))<<f.logMapsPerEpoch + uint64(mapSubIndex) |
||||
} |
||||
|
||||
// getBlockLvPointer returns the starting log value index where the log values
|
||||
// generated by the given block are located. If blockNumber is beyond the current
|
||||
// head then the first unoccupied log value index is returned.
|
||||
// Note that this function assumes that the indexer read lock is being held when
|
||||
// called from outside the updateLoop goroutine.
|
||||
func (f *FilterMaps) getBlockLvPointer(blockNumber uint64) (uint64, error) { |
||||
if blockNumber > f.targetBlockNumber && f.targetBlockNumber+1 == f.afterLastIndexedBlock { |
||||
return f.headBlockDelimiter, nil |
||||
} |
||||
if lvPointer, ok := f.lvPointerCache.Get(blockNumber); ok { |
||||
return lvPointer, nil |
||||
} |
||||
lvPointer, err := rawdb.ReadBlockLvPointer(f.db, blockNumber) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
f.lvPointerCache.Add(blockNumber, lvPointer) |
||||
return lvPointer, nil |
||||
} |
||||
|
||||
// storeBlockLvPointer stores the starting log value index where the log values
|
||||
// generated by the given block are located.
|
||||
func (f *FilterMaps) storeBlockLvPointer(batch ethdb.Batch, blockNumber, lvPointer uint64) { |
||||
f.lvPointerCache.Add(blockNumber, lvPointer) |
||||
rawdb.WriteBlockLvPointer(batch, blockNumber, lvPointer) |
||||
} |
||||
|
||||
// deleteBlockLvPointer deletes the starting log value index where the log values
|
||||
// generated by the given block are located.
|
||||
func (f *FilterMaps) deleteBlockLvPointer(batch ethdb.Batch, blockNumber uint64) { |
||||
f.lvPointerCache.Remove(blockNumber) |
||||
rawdb.DeleteBlockLvPointer(batch, blockNumber) |
||||
} |
||||
|
||||
// getLastBlockOfMap returns the number and id of the block that generated the
|
||||
// last log value entry of the given map.
|
||||
func (f *FilterMaps) getLastBlockOfMap(mapIndex uint32) (uint64, common.Hash, error) { |
||||
if lastBlock, ok := f.lastBlockCache.Get(mapIndex); ok { |
||||
return lastBlock.number, lastBlock.id, nil |
||||
} |
||||
number, id, err := rawdb.ReadFilterMapLastBlock(f.db, mapIndex) |
||||
if err != nil { |
||||
return 0, common.Hash{}, err |
||||
} |
||||
f.lastBlockCache.Add(mapIndex, lastBlockOfMap{number: number, id: id}) |
||||
return number, id, nil |
||||
} |
||||
|
||||
// storeLastBlockOfMap stores the number of the block that generated the last
|
||||
// log value entry of the given map.
|
||||
func (f *FilterMaps) storeLastBlockOfMap(batch ethdb.Batch, mapIndex uint32, number uint64, id common.Hash) { |
||||
f.lastBlockCache.Add(mapIndex, lastBlockOfMap{number: number, id: id}) |
||||
rawdb.WriteFilterMapLastBlock(batch, mapIndex, number, id) |
||||
} |
||||
|
||||
// deleteLastBlockOfMap deletes the number of the block that generated the last
|
||||
// log value entry of the given map.
|
||||
func (f *FilterMaps) deleteLastBlockOfMap(batch ethdb.Batch, mapIndex uint32) { |
||||
f.lastBlockCache.Remove(mapIndex) |
||||
rawdb.DeleteFilterMapLastBlock(batch, mapIndex) |
||||
} |
||||
|
||||
func (f *FilterMaps) deleteTailEpoch(epoch uint32) error { |
||||
firstMap := epoch << f.logMapsPerEpoch |
||||
lastBlock, _, err := f.getLastBlockOfMap(firstMap + f.mapsPerEpoch - 1) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
var firstBlock uint64 |
||||
if epoch > 0 { |
||||
firstBlock, _, err = f.getLastBlockOfMap(firstMap - 1) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
firstBlock++ |
||||
} |
||||
fmr := f.filterMapsRange |
||||
if f.firstRenderedMap == firstMap && f.afterLastRenderedMap > firstMap+f.mapsPerEpoch && f.tailPartialEpoch == 0 { |
||||
fmr.firstRenderedMap = firstMap + f.mapsPerEpoch |
||||
fmr.firstIndexedBlock = lastBlock + 1 |
||||
} else if f.firstRenderedMap == firstMap+f.mapsPerEpoch { |
||||
fmr.tailPartialEpoch = 0 |
||||
} else { |
||||
return errors.New("invalid tail epoch number") |
||||
} |
||||
f.setRange(f.db, fmr, true) |
||||
rawdb.DeleteFilterMapRows(f.db, f.mapRowIndex(firstMap, 0), f.mapRowIndex(firstMap+f.mapsPerEpoch, 0)) |
||||
for mapIndex := firstMap; mapIndex < firstMap+f.mapsPerEpoch; mapIndex++ { |
||||
f.filterMapCache.Remove(mapIndex) |
||||
} |
||||
rawdb.DeleteFilterMapLastBlocks(f.db, firstMap, firstMap+f.mapsPerEpoch-1) // keep last enrty
|
||||
for mapIndex := firstMap; mapIndex < firstMap+f.mapsPerEpoch-1; mapIndex++ { |
||||
f.lastBlockCache.Remove(mapIndex) |
||||
} |
||||
rawdb.DeleteBlockLvPointers(f.db, firstBlock, lastBlock) // keep last enrty
|
||||
for blockNumber := firstBlock; blockNumber < lastBlock; blockNumber++ { |
||||
f.lvPointerCache.Remove(blockNumber) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// exportCheckpoints exports epoch checkpoints in the format used by checkpoints.go.
|
||||
func (f *FilterMaps) exportCheckpoints() { |
||||
if f.exportFileName == "" { |
||||
return |
||||
} |
||||
w, err := os.Create(f.exportFileName) |
||||
if err != nil { |
||||
log.Error("Error creating checkpoint export file", "name", f.exportFileName, "error", err) |
||||
return |
||||
} |
||||
defer w.Close() |
||||
|
||||
epochCount := f.afterLastRenderedMap >> f.logMapsPerEpoch |
||||
log.Info("Exporting log index checkpoints", "epochs", epochCount, "file", f.exportFileName) |
||||
w.WriteString("\t{\n") |
||||
for epoch := uint32(0); epoch < epochCount; epoch++ { |
||||
lastBlock, lastBlockId, err := f.getLastBlockOfMap((epoch+1)<<f.logMapsPerEpoch - 1) |
||||
if err != nil { |
||||
log.Error("Error fetching last block of epoch", "epoch", epoch, "error", err) |
||||
return |
||||
} |
||||
lvPtr, err := f.getBlockLvPointer(lastBlock) |
||||
if err != nil { |
||||
log.Error("Error fetching log value pointer of last block", "block", lastBlock, "error", err) |
||||
return |
||||
} |
||||
w.WriteString(fmt.Sprintf("\t\t{%d, common.HexToHash(\"0x%064x\"), %d},\n", lastBlock, lastBlockId, lvPtr)) |
||||
} |
||||
w.WriteString("\t},\n") |
||||
} |
@ -0,0 +1,349 @@ |
||||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package filtermaps |
||||
|
||||
import ( |
||||
"math" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
) |
||||
|
||||
const ( |
||||
cachedRevertPoints = 8 // revert points for most recent blocks in memory
|
||||
logFrequency = time.Second * 20 // log info frequency during long indexing/unindexing process
|
||||
headLogDelay = time.Second // head indexing log info delay (do not log if finished faster)
|
||||
) |
||||
|
||||
// updateLoop initializes and updates the log index structure according to the
|
||||
// current targetView.
|
||||
func (f *FilterMaps) indexerLoop() { |
||||
defer f.closeWg.Done() |
||||
|
||||
if f.noHistory { |
||||
f.reset() |
||||
return |
||||
} |
||||
log.Info("Started log indexer") |
||||
|
||||
for !f.stop { |
||||
if !f.initialized { |
||||
if err := f.init(); err != nil { |
||||
log.Error("Error initializing log index", "error", err) |
||||
f.waitForEvent() |
||||
continue |
||||
} |
||||
} |
||||
if !f.targetHeadIndexed() { |
||||
if !f.tryIndexHead() { |
||||
f.waitForEvent() |
||||
} |
||||
} else { |
||||
if f.tryIndexTail() && f.tryUnindexTail() { |
||||
f.waitForEvent() |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
// WaitIdle blocks until the indexer is in an idle state while synced up to the
|
||||
// latest targetView.
|
||||
func (f *FilterMaps) WaitIdle() { |
||||
if f.noHistory { |
||||
f.closeWg.Wait() |
||||
return |
||||
} |
||||
for { |
||||
ch := make(chan bool) |
||||
f.waitIdleCh <- ch |
||||
if <-ch { |
||||
return |
||||
} |
||||
} |
||||
} |
||||
|
||||
// waitForEvent blocks until an event happens that the indexer might react to.
|
||||
func (f *FilterMaps) waitForEvent() { |
||||
for !f.stop && (f.blockProcessing || f.targetHeadIndexed()) { |
||||
f.processSingleEvent(true) |
||||
} |
||||
} |
||||
|
||||
// processEvents processes all events, blocking only if a block processing is
|
||||
// happening and indexing should be suspended.
|
||||
func (f *FilterMaps) processEvents() { |
||||
for !f.stop && f.processSingleEvent(f.blockProcessing) { |
||||
} |
||||
} |
||||
|
||||
// processSingleEvent processes a single event either in a blocking or
|
||||
// non-blocking manner.
|
||||
func (f *FilterMaps) processSingleEvent(blocking bool) bool { |
||||
if f.matcherSyncRequest != nil { |
||||
f.matcherSyncRequest.synced(f.targetBlockNumber) |
||||
f.matcherSyncRequest = nil |
||||
} |
||||
if blocking { |
||||
select { |
||||
case targetView := <-f.TargetViewCh: |
||||
f.setTargetView(targetView) |
||||
case f.matcherSyncRequest = <-f.matcherSyncCh: |
||||
case f.blockProcessing = <-f.BlockProcessingCh: |
||||
case <-f.closeCh: |
||||
f.stop = true |
||||
case ch := <-f.waitIdleCh: |
||||
ch <- !f.blockProcessing && f.targetHeadIndexed() |
||||
} |
||||
} else { |
||||
select { |
||||
case targetView := <-f.TargetViewCh: |
||||
f.setTargetView(targetView) |
||||
case f.matcherSyncRequest = <-f.matcherSyncCh: |
||||
case f.blockProcessing = <-f.BlockProcessingCh: |
||||
case <-f.closeCh: |
||||
f.stop = true |
||||
default: |
||||
return false |
||||
} |
||||
} |
||||
return true |
||||
} |
||||
|
||||
// setTargetView updates the target chain view of the iterator.
|
||||
func (f *FilterMaps) setTargetView(targetView chainView) { |
||||
if equalViews(f.targetView, targetView) { |
||||
return |
||||
} |
||||
f.targetView = targetView |
||||
} |
||||
|
||||
// tryIndexHead tries to render head maps according to the current targetView
|
||||
// and returns true if successful.
|
||||
func (f *FilterMaps) tryIndexHead() bool { |
||||
if f.targetView == nil { |
||||
return false |
||||
} |
||||
headRenderer, err := f.renderMapsBefore(math.MaxUint32) |
||||
if err != nil { |
||||
log.Error("Error creating log index head renderer", "error", err) |
||||
return false |
||||
} |
||||
if headRenderer == nil { |
||||
return true |
||||
} |
||||
if !f.startedHeadIndex { |
||||
f.lastLogHeadIndex = time.Now() |
||||
f.startedHeadIndexAt = f.lastLogHeadIndex |
||||
f.startedHeadIndex = true |
||||
f.ptrHeadIndex = f.afterLastIndexedBlock |
||||
} |
||||
if _, err := headRenderer.run(func() bool { |
||||
f.processEvents() |
||||
return f.stop |
||||
}, func() { |
||||
f.tryUnindexTail() |
||||
if f.hasIndexedBlocks() && f.afterLastIndexedBlock >= f.ptrHeadIndex && |
||||
((!f.loggedHeadIndex && time.Since(f.startedHeadIndexAt) > headLogDelay) || |
||||
time.Since(f.lastLogHeadIndex) > logFrequency) { |
||||
log.Info("Log index head rendering in progress", |
||||
"first block", f.firstIndexedBlock, "last block", f.afterLastIndexedBlock-1, |
||||
"processed", f.afterLastIndexedBlock-f.ptrHeadIndex, |
||||
"remaining", f.targetBlockNumber+1-f.afterLastIndexedBlock, |
||||
"elapsed", common.PrettyDuration(time.Since(f.startedHeadIndexAt))) |
||||
f.loggedHeadIndex = true |
||||
f.lastLogHeadIndex = time.Now() |
||||
} |
||||
}); err != nil { |
||||
log.Error("Log index head rendering failed", "error", err) |
||||
return false |
||||
} |
||||
if f.loggedHeadIndex { |
||||
log.Info("Log index head rendering finished", |
||||
"first block", f.firstIndexedBlock, "last block", f.afterLastIndexedBlock-1, |
||||
"processed", f.afterLastIndexedBlock-f.ptrHeadIndex, |
||||
"elapsed", common.PrettyDuration(time.Since(f.startedHeadIndexAt))) |
||||
} |
||||
f.loggedHeadIndex, f.startedHeadIndex = false, false |
||||
return true |
||||
} |
||||
|
||||
// tryIndexTail tries to render tail epochs until the tail target block is
|
||||
// indexed and returns true if successful.
|
||||
// Note that tail indexing is only started if the log index head is fully
|
||||
// rendered according to targetView and is suspended as soon as the targetView
|
||||
// is changed.
|
||||
func (f *FilterMaps) tryIndexTail() bool { |
||||
for firstEpoch := f.firstRenderedMap >> f.logMapsPerEpoch; firstEpoch > 0 && f.needTailEpoch(firstEpoch-1); { |
||||
f.processEvents() |
||||
if f.stop || !f.targetHeadIndexed() { |
||||
return false |
||||
} |
||||
// resume process if tail rendering was interrupted because of head rendering
|
||||
tailRenderer := f.tailRenderer |
||||
f.tailRenderer = nil |
||||
if tailRenderer != nil && tailRenderer.afterLastMap != f.firstRenderedMap { |
||||
tailRenderer = nil |
||||
} |
||||
if tailRenderer == nil { |
||||
var err error |
||||
tailRenderer, err = f.renderMapsBefore(f.firstRenderedMap) |
||||
if err != nil { |
||||
log.Error("Error creating log index tail renderer", "error", err) |
||||
return false |
||||
} |
||||
} |
||||
if tailRenderer == nil { |
||||
return true |
||||
} |
||||
if !f.startedTailIndex { |
||||
f.lastLogTailIndex = time.Now() |
||||
f.startedTailIndexAt = f.lastLogTailIndex |
||||
f.startedTailIndex = true |
||||
f.ptrTailIndex = f.firstIndexedBlock - f.tailPartialBlocks() |
||||
} |
||||
done, err := tailRenderer.run(func() bool { |
||||
f.processEvents() |
||||
return f.stop || !f.targetHeadIndexed() |
||||
}, func() { |
||||
tpb, ttb := f.tailPartialBlocks(), f.tailTargetBlock() |
||||
remaining := uint64(1) |
||||
if f.firstIndexedBlock > ttb+tpb { |
||||
remaining = f.firstIndexedBlock - ttb - tpb |
||||
} |
||||
if f.hasIndexedBlocks() && f.ptrTailIndex >= f.firstIndexedBlock && |
||||
(!f.loggedTailIndex || time.Since(f.lastLogTailIndex) > logFrequency) { |
||||
log.Info("Log index tail rendering in progress", |
||||
"first block", f.firstIndexedBlock, "last block", f.afterLastIndexedBlock-1, |
||||
"processed", f.ptrTailIndex-f.firstIndexedBlock+tpb, |
||||
"remaining", remaining, |
||||
"next tail epoch percentage", f.tailPartialEpoch*100/f.mapsPerEpoch, |
||||
"elapsed", common.PrettyDuration(time.Since(f.startedTailIndexAt))) |
||||
f.loggedTailIndex = true |
||||
f.lastLogTailIndex = time.Now() |
||||
} |
||||
}) |
||||
if err != nil { |
||||
log.Error("Log index tail rendering failed", "error", err) |
||||
} |
||||
if !done { |
||||
f.tailRenderer = tailRenderer // only keep tail renderer if interrupted by stopCb
|
||||
return false |
||||
} |
||||
} |
||||
if f.loggedTailIndex { |
||||
log.Info("Log index tail rendering finished", |
||||
"first block", f.firstIndexedBlock, "last block", f.afterLastIndexedBlock-1, |
||||
"processed", f.ptrTailIndex-f.firstIndexedBlock, |
||||
"elapsed", common.PrettyDuration(time.Since(f.startedTailIndexAt))) |
||||
f.loggedTailIndex = false |
||||
} |
||||
return true |
||||
} |
||||
|
||||
// tryUnindexTail removes entire epochs of log index data as long as the first
|
||||
// fully indexed block is at least as old as the tail target.
|
||||
// Note that unindexing is very quick as it only removes continuous ranges of
|
||||
// data from the database and is also called while running head indexing.
|
||||
func (f *FilterMaps) tryUnindexTail() bool { |
||||
for { |
||||
firstEpoch := (f.firstRenderedMap - f.tailPartialEpoch) >> f.logMapsPerEpoch |
||||
if f.needTailEpoch(firstEpoch) { |
||||
break |
||||
} |
||||
f.processEvents() |
||||
if f.stop { |
||||
return false |
||||
} |
||||
if !f.startedTailUnindex { |
||||
f.startedTailUnindexAt = time.Now() |
||||
f.startedTailUnindex = true |
||||
f.ptrTailUnindexMap = f.firstRenderedMap - f.tailPartialEpoch |
||||
f.ptrTailUnindexBlock = f.firstIndexedBlock |
||||
} |
||||
if err := f.deleteTailEpoch(firstEpoch); err != nil { |
||||
log.Error("Log index tail epoch unindexing failed", "error", err) |
||||
return false |
||||
} |
||||
} |
||||
if f.startedTailUnindex { |
||||
log.Info("Log index tail unindexing finished", |
||||
"first block", f.firstIndexedBlock, "last block", f.afterLastIndexedBlock-1, |
||||
"removed maps", f.ptrTailUnindexMap-f.firstRenderedMap, |
||||
"removed blocks", f.ptrTailUnindexBlock-f.firstIndexedBlock, |
||||
"elapsed", common.PrettyDuration(time.Since(f.startedTailUnindexAt))) |
||||
f.startedTailUnindex = false |
||||
} |
||||
return true |
||||
} |
||||
|
||||
// needTailEpoch returns true if the given tail epoch needs to be kept
|
||||
// according to the current tail target, false if it can be removed.
|
||||
func (f *FilterMaps) needTailEpoch(epoch uint32) bool { |
||||
firstEpoch := f.firstRenderedMap >> f.logMapsPerEpoch |
||||
if epoch > firstEpoch { |
||||
return true |
||||
} |
||||
if epoch+1 < firstEpoch { |
||||
return false |
||||
} |
||||
tailTarget := f.tailTargetBlock() |
||||
if tailTarget < f.firstIndexedBlock { |
||||
return true |
||||
} |
||||
tailLvIndex, err := f.getBlockLvPointer(tailTarget) |
||||
if err != nil { |
||||
log.Error("Could not get log value index of tail block", "error", err) |
||||
return true |
||||
} |
||||
return uint64(epoch+1)<<(f.logValuesPerMap+f.logMapsPerEpoch) >= tailLvIndex |
||||
} |
||||
|
||||
// tailTargetBlock returns the target value for the tail block number according
|
||||
// to the log history parameter and the current index head.
|
||||
func (f *FilterMaps) tailTargetBlock() uint64 { |
||||
if f.history == 0 || f.targetBlockNumber < f.history { |
||||
return 0 |
||||
} |
||||
return f.targetBlockNumber + 1 - f.history |
||||
} |
||||
|
||||
// tailPartialBlocks returns the number of rendered blocks in the partially
|
||||
// rendered next tail epoch.
|
||||
func (f *FilterMaps) tailPartialBlocks() uint64 { |
||||
if f.tailPartialEpoch == 0 { |
||||
return 0 |
||||
} |
||||
end, _, err := f.getLastBlockOfMap(f.firstRenderedMap - f.mapsPerEpoch + f.tailPartialEpoch - 1) |
||||
if err != nil { |
||||
log.Error("Error fetching last block of map", "mapIndex", f.firstRenderedMap-f.mapsPerEpoch+f.tailPartialEpoch-1, "error", err) |
||||
} |
||||
var start uint64 |
||||
if f.firstRenderedMap-f.mapsPerEpoch > 0 { |
||||
start, _, err = f.getLastBlockOfMap(f.firstRenderedMap - f.mapsPerEpoch - 1) |
||||
if err != nil { |
||||
log.Error("Error fetching last block of map", "mapIndex", f.firstRenderedMap-f.mapsPerEpoch-1, "error", err) |
||||
} |
||||
} |
||||
return end - start |
||||
} |
||||
|
||||
// targetHeadIndexed returns true if the current log index is consistent with
|
||||
// targetView with its head block fully rendered.
|
||||
func (f *FilterMaps) targetHeadIndexed() bool { |
||||
return equalViews(f.targetView, f.indexedView) && f.afterLastIndexedBlock == f.targetBlockNumber+1 |
||||
} |
@ -0,0 +1,437 @@ |
||||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package filtermaps |
||||
|
||||
import ( |
||||
crand "crypto/rand" |
||||
"crypto/sha256" |
||||
"math/big" |
||||
"math/rand" |
||||
"sync" |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/consensus/ethash" |
||||
"github.com/ethereum/go-ethereum/core" |
||||
"github.com/ethereum/go-ethereum/core/rawdb" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/params" |
||||
) |
||||
|
||||
var testParams = Params{ |
||||
logMapHeight: 2, |
||||
logMapsPerEpoch: 4, |
||||
logValuesPerMap: 4, |
||||
baseRowGroupLength: 4, |
||||
baseRowLengthRatio: 2, |
||||
logLayerDiff: 2, |
||||
} |
||||
|
||||
func TestIndexerRandomRange(t *testing.T) { |
||||
ts := newTestSetup(t) |
||||
defer ts.close() |
||||
|
||||
forks := make([][]common.Hash, 10) |
||||
ts.chain.addBlocks(1000, 5, 2, 4, false) // 51 log values per block
|
||||
for i := range forks { |
||||
if i != 0 { |
||||
forkBlock := rand.Intn(1000) |
||||
ts.chain.setHead(forkBlock) |
||||
ts.chain.addBlocks(1000-forkBlock, 5, 2, 4, false) // 51 log values per block
|
||||
} |
||||
forks[i] = ts.chain.getCanonicalChain() |
||||
} |
||||
lvPerBlock := uint64(51) |
||||
ts.setHistory(0, false) |
||||
var ( |
||||
history int |
||||
noHistory bool |
||||
fork, head = len(forks) - 1, 1000 |
||||
checkSnapshot bool |
||||
) |
||||
ts.fm.WaitIdle() |
||||
for i := 0; i < 200; i++ { |
||||
switch rand.Intn(3) { |
||||
case 0: |
||||
// change history settings
|
||||
switch rand.Intn(10) { |
||||
case 0: |
||||
history, noHistory = 0, false |
||||
case 1: |
||||
history, noHistory = 0, true |
||||
default: |
||||
history, noHistory = rand.Intn(1000)+1, false |
||||
} |
||||
ts.testDisableSnapshots = rand.Intn(2) == 0 |
||||
ts.setHistory(uint64(history), noHistory) |
||||
case 1: |
||||
// change head to random position of random fork
|
||||
fork, head = rand.Intn(len(forks)), rand.Intn(1001) |
||||
ts.chain.setCanonicalChain(forks[fork][:head+1]) |
||||
case 2: |
||||
if head < 1000 { |
||||
checkSnapshot = !noHistory && head != 0 // no snapshot generated for block 0
|
||||
// add blocks after the current head
|
||||
head += rand.Intn(1000-head) + 1 |
||||
ts.fm.testSnapshotUsed = false |
||||
ts.chain.setCanonicalChain(forks[fork][:head+1]) |
||||
} |
||||
} |
||||
ts.fm.WaitIdle() |
||||
if checkSnapshot { |
||||
if ts.fm.testSnapshotUsed == ts.fm.testDisableSnapshots { |
||||
ts.t.Fatalf("Invalid snapshot used state after head extension (used: %v, disabled: %v)", ts.fm.testSnapshotUsed, ts.fm.testDisableSnapshots) |
||||
} |
||||
checkSnapshot = false |
||||
} |
||||
if noHistory { |
||||
if ts.fm.initialized { |
||||
t.Fatalf("filterMapsRange initialized while indexing is disabled") |
||||
} |
||||
continue |
||||
} |
||||
if !ts.fm.initialized { |
||||
t.Fatalf("filterMapsRange not initialized while indexing is enabled") |
||||
} |
||||
var tailBlock uint64 |
||||
if history > 0 && history <= head { |
||||
tailBlock = uint64(head + 1 - history) |
||||
} |
||||
var tailEpoch uint32 |
||||
if tailBlock > 0 { |
||||
tailLvPtr := uint64(tailBlock-1) * lvPerBlock // no logs in genesis block, only delimiter
|
||||
tailEpoch = uint32(tailLvPtr >> (testParams.logValuesPerMap + testParams.logMapsPerEpoch)) |
||||
} |
||||
var expTailBlock uint64 |
||||
if tailEpoch > 0 { |
||||
tailLvPtr := uint64(tailEpoch) << (testParams.logValuesPerMap + testParams.logMapsPerEpoch) // first available lv ptr
|
||||
// (expTailBlock-1)*lvPerBlock >= tailLvPtr
|
||||
expTailBlock = (tailLvPtr + lvPerBlock*2 - 1) / lvPerBlock |
||||
} |
||||
if ts.fm.afterLastIndexedBlock != uint64(head+1) || ts.fm.targetBlockNumber != uint64(head) || ts.fm.targetBlockId != forks[fork][head] { |
||||
ts.t.Fatalf("Invalid index head (expected #%d %v, got #%d %v)", head, forks[fork][head], ts.fm.afterLastIndexedBlock-1, ts.fm.targetBlockId) |
||||
} |
||||
if ts.fm.headBlockDelimiter != uint64(head)*lvPerBlock { |
||||
ts.t.Fatalf("Invalid index head delimiter pointer (expected %d, got %d)", uint64(head)*lvPerBlock, ts.fm.headBlockDelimiter) |
||||
} |
||||
if ts.fm.firstIndexedBlock != expTailBlock { |
||||
ts.t.Fatalf("Invalid index tail block (expected #%d, got #%d)", expTailBlock, ts.fm.firstIndexedBlock) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func TestIndexerCompareDb(t *testing.T) { |
||||
ts := newTestSetup(t) |
||||
defer ts.close() |
||||
|
||||
ts.chain.addBlocks(500, 10, 3, 4, true) |
||||
ts.setHistory(0, false) |
||||
ts.fm.WaitIdle() |
||||
// revert points are stored after block 500
|
||||
ts.chain.addBlocks(500, 10, 3, 4, true) |
||||
ts.fm.WaitIdle() |
||||
chain1 := ts.chain.getCanonicalChain() |
||||
ts.storeDbHash("chain 1 [0, 1000]") |
||||
|
||||
ts.chain.setHead(600) |
||||
ts.fm.WaitIdle() |
||||
ts.storeDbHash("chain 1/2 [0, 600]") |
||||
|
||||
ts.chain.addBlocks(600, 10, 3, 4, true) |
||||
ts.fm.WaitIdle() |
||||
chain2 := ts.chain.getCanonicalChain() |
||||
ts.storeDbHash("chain 2 [0, 1200]") |
||||
|
||||
ts.chain.setHead(600) |
||||
ts.fm.WaitIdle() |
||||
ts.checkDbHash("chain 1/2 [0, 600]") |
||||
|
||||
ts.setHistory(800, false) |
||||
ts.chain.setCanonicalChain(chain1) |
||||
ts.fm.WaitIdle() |
||||
ts.storeDbHash("chain 1 [201, 1000]") |
||||
|
||||
ts.setHistory(0, false) |
||||
ts.fm.WaitIdle() |
||||
ts.checkDbHash("chain 1 [0, 1000]") |
||||
|
||||
ts.setHistory(800, false) |
||||
ts.chain.setCanonicalChain(chain2) |
||||
ts.fm.WaitIdle() |
||||
ts.storeDbHash("chain 2 [401, 1200]") |
||||
|
||||
ts.setHistory(0, true) |
||||
ts.fm.WaitIdle() |
||||
ts.storeDbHash("no index") |
||||
|
||||
ts.chain.setCanonicalChain(chain2[:501]) |
||||
ts.setHistory(0, false) |
||||
ts.fm.WaitIdle() |
||||
ts.chain.setCanonicalChain(chain2) |
||||
ts.fm.WaitIdle() |
||||
ts.checkDbHash("chain 2 [0, 1200]") |
||||
|
||||
ts.chain.setCanonicalChain(chain1) |
||||
ts.fm.WaitIdle() |
||||
ts.setHistory(800, false) |
||||
ts.fm.WaitIdle() |
||||
ts.checkDbHash("chain 1 [201, 1000]") |
||||
|
||||
ts.chain.setCanonicalChain(chain2) |
||||
ts.fm.WaitIdle() |
||||
ts.checkDbHash("chain 2 [401, 1200]") |
||||
|
||||
ts.setHistory(0, true) |
||||
ts.fm.WaitIdle() |
||||
ts.checkDbHash("no index") |
||||
} |
||||
|
||||
type testSetup struct { |
||||
t *testing.T |
||||
fm *FilterMaps |
||||
db ethdb.Database |
||||
chain *testChain |
||||
params Params |
||||
dbHashes map[string]common.Hash |
||||
testDisableSnapshots bool |
||||
} |
||||
|
||||
func newTestSetup(t *testing.T) *testSetup { |
||||
params := testParams |
||||
params.deriveFields() |
||||
ts := &testSetup{ |
||||
t: t, |
||||
db: rawdb.NewMemoryDatabase(), |
||||
params: params, |
||||
dbHashes: make(map[string]common.Hash), |
||||
} |
||||
ts.chain = ts.newTestChain() |
||||
return ts |
||||
} |
||||
|
||||
func (ts *testSetup) setHistory(history uint64, noHistory bool) { |
||||
if ts.fm != nil { |
||||
ts.fm.Stop() |
||||
} |
||||
head := ts.chain.CurrentBlock() |
||||
ts.fm = NewFilterMaps(ts.db, NewStoredChainView(ts.chain, head.Number.Uint64(), head.Hash()), ts.params, history, 1, noHistory, "") |
||||
ts.fm.testDisableSnapshots = ts.testDisableSnapshots |
||||
ts.fm.Start() |
||||
} |
||||
|
||||
func (ts *testSetup) storeDbHash(id string) { |
||||
dbHash := ts.fmDbHash() |
||||
for otherId, otherHash := range ts.dbHashes { |
||||
if otherHash == dbHash { |
||||
ts.t.Fatalf("Unexpected equal database hashes `%s` and `%s`", id, otherId) |
||||
} |
||||
} |
||||
ts.dbHashes[id] = dbHash |
||||
} |
||||
|
||||
func (ts *testSetup) checkDbHash(id string) { |
||||
if ts.fmDbHash() != ts.dbHashes[id] { |
||||
ts.t.Fatalf("Database `%s` hash mismatch", id) |
||||
} |
||||
} |
||||
|
||||
func (ts *testSetup) fmDbHash() common.Hash { |
||||
hasher := sha256.New() |
||||
it := ts.db.NewIterator(nil, nil) |
||||
for it.Next() { |
||||
hasher.Write(it.Key()) |
||||
hasher.Write(it.Value()) |
||||
} |
||||
it.Release() |
||||
var result common.Hash |
||||
hasher.Sum(result[:0]) |
||||
return result |
||||
} |
||||
|
||||
func (ts *testSetup) close() { |
||||
if ts.fm != nil { |
||||
ts.fm.Stop() |
||||
} |
||||
ts.db.Close() |
||||
ts.chain.db.Close() |
||||
} |
||||
|
||||
type testChain struct { |
||||
ts *testSetup |
||||
db ethdb.Database |
||||
lock sync.RWMutex |
||||
canonical []common.Hash |
||||
blocks map[common.Hash]*types.Block |
||||
receipts map[common.Hash]types.Receipts |
||||
} |
||||
|
||||
func (ts *testSetup) newTestChain() *testChain { |
||||
return &testChain{ |
||||
ts: ts, |
||||
blocks: make(map[common.Hash]*types.Block), |
||||
receipts: make(map[common.Hash]types.Receipts), |
||||
} |
||||
} |
||||
|
||||
func (tc *testChain) CurrentBlock() *types.Header { |
||||
tc.lock.RLock() |
||||
defer tc.lock.RUnlock() |
||||
|
||||
if len(tc.canonical) == 0 { |
||||
return nil |
||||
} |
||||
return tc.blocks[tc.canonical[len(tc.canonical)-1]].Header() |
||||
} |
||||
|
||||
func (tc *testChain) GetHeader(hash common.Hash, number uint64) *types.Header { |
||||
tc.lock.RLock() |
||||
defer tc.lock.RUnlock() |
||||
|
||||
if block := tc.blocks[hash]; block != nil { |
||||
return block.Header() |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (tc *testChain) GetCanonicalHash(number uint64) common.Hash { |
||||
tc.lock.RLock() |
||||
defer tc.lock.RUnlock() |
||||
|
||||
if uint64(len(tc.canonical)) <= number { |
||||
return common.Hash{} |
||||
} |
||||
return tc.canonical[number] |
||||
} |
||||
|
||||
func (tc *testChain) GetReceiptsByHash(hash common.Hash) types.Receipts { |
||||
tc.lock.RLock() |
||||
defer tc.lock.RUnlock() |
||||
|
||||
return tc.receipts[hash] |
||||
} |
||||
|
||||
func (tc *testChain) addBlocks(count, maxTxPerBlock, maxLogsPerReceipt, maxTopicsPerLog int, random bool) { |
||||
tc.lock.Lock() |
||||
blockGen := func(i int, gen *core.BlockGen) { |
||||
var txCount int |
||||
if random { |
||||
txCount = rand.Intn(maxTxPerBlock + 1) |
||||
} else { |
||||
txCount = maxTxPerBlock |
||||
} |
||||
for k := txCount; k > 0; k-- { |
||||
receipt := types.NewReceipt(nil, false, 0) |
||||
var logCount int |
||||
if random { |
||||
logCount = rand.Intn(maxLogsPerReceipt + 1) |
||||
} else { |
||||
logCount = maxLogsPerReceipt |
||||
} |
||||
receipt.Logs = make([]*types.Log, logCount) |
||||
for i := range receipt.Logs { |
||||
log := &types.Log{} |
||||
receipt.Logs[i] = log |
||||
crand.Read(log.Address[:]) |
||||
var topicCount int |
||||
if random { |
||||
topicCount = rand.Intn(maxTopicsPerLog + 1) |
||||
} else { |
||||
topicCount = maxTopicsPerLog |
||||
} |
||||
log.Topics = make([]common.Hash, topicCount) |
||||
for j := range log.Topics { |
||||
crand.Read(log.Topics[j][:]) |
||||
} |
||||
} |
||||
gen.AddUncheckedReceipt(receipt) |
||||
gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil)) |
||||
} |
||||
} |
||||
|
||||
var ( |
||||
blocks []*types.Block |
||||
receipts []types.Receipts |
||||
engine = ethash.NewFaker() |
||||
) |
||||
|
||||
if len(tc.canonical) == 0 { |
||||
gspec := &core.Genesis{ |
||||
Alloc: types.GenesisAlloc{}, |
||||
BaseFee: big.NewInt(params.InitialBaseFee), |
||||
Config: params.TestChainConfig, |
||||
} |
||||
tc.db, blocks, receipts = core.GenerateChainWithGenesis(gspec, engine, count, blockGen) |
||||
gblock := gspec.ToBlock() |
||||
ghash := gblock.Hash() |
||||
tc.canonical = []common.Hash{ghash} |
||||
tc.blocks[ghash] = gblock |
||||
tc.receipts[ghash] = types.Receipts{} |
||||
} else { |
||||
blocks, receipts = core.GenerateChain(params.TestChainConfig, tc.blocks[tc.canonical[len(tc.canonical)-1]], engine, tc.db, count, blockGen) |
||||
} |
||||
|
||||
for i, block := range blocks { |
||||
num, hash := int(block.NumberU64()), block.Hash() |
||||
if len(tc.canonical) != num { |
||||
panic("canonical chain length mismatch") |
||||
} |
||||
tc.canonical = append(tc.canonical, hash) |
||||
tc.blocks[hash] = block |
||||
if receipts[i] != nil { |
||||
tc.receipts[hash] = receipts[i] |
||||
} else { |
||||
tc.receipts[hash] = types.Receipts{} |
||||
} |
||||
} |
||||
tc.lock.Unlock() |
||||
tc.setTargetHead() |
||||
} |
||||
|
||||
func (tc *testChain) setHead(headNum int) { |
||||
tc.lock.Lock() |
||||
tc.canonical = tc.canonical[:headNum+1] |
||||
tc.lock.Unlock() |
||||
tc.setTargetHead() |
||||
} |
||||
|
||||
func (tc *testChain) setTargetHead() { |
||||
head := tc.CurrentBlock() |
||||
if tc.ts.fm != nil { |
||||
if !tc.ts.fm.noHistory { |
||||
tc.ts.fm.TargetViewCh <- NewStoredChainView(tc, head.Number.Uint64(), head.Hash()) |
||||
} |
||||
} |
||||
} |
||||
|
||||
func (tc *testChain) getCanonicalChain() []common.Hash { |
||||
tc.lock.RLock() |
||||
defer tc.lock.RUnlock() |
||||
|
||||
cc := make([]common.Hash, len(tc.canonical)) |
||||
copy(cc, tc.canonical) |
||||
return cc |
||||
} |
||||
|
||||
// restore an earlier state of the chain
|
||||
func (tc *testChain) setCanonicalChain(cc []common.Hash) { |
||||
tc.lock.Lock() |
||||
tc.canonical = make([]common.Hash, len(cc)) |
||||
copy(tc.canonical, cc) |
||||
tc.lock.Unlock() |
||||
tc.setTargetHead() |
||||
} |
@ -0,0 +1,756 @@ |
||||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package filtermaps |
||||
|
||||
import ( |
||||
"errors" |
||||
"math" |
||||
"sort" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/common/lru" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
) |
||||
|
||||
const ( |
||||
valuesPerCallback = 1000 // log values processed per event process callback
|
||||
maxMapsPerBatch = 64 // maximum number of maps rendered in memory
|
||||
rowsPerBatch = 100 // number of rows written to db in a single batch
|
||||
) |
||||
|
||||
var ( |
||||
errChainUpdate = errors.New("rendered section of chain updated") |
||||
) |
||||
|
||||
// mapRenderer represents a process that renders filter maps in a specified
|
||||
// range according to the actual targetView.
|
||||
type mapRenderer struct { |
||||
f *FilterMaps |
||||
afterLastMap uint32 |
||||
currentMap *renderedMap |
||||
finishedMaps map[uint32]*renderedMap |
||||
firstFinished, afterLastFinished uint32 |
||||
iterator *logIterator |
||||
} |
||||
|
||||
// renderedMap represents a single filter map that is being rendered in memory.
|
||||
type renderedMap struct { |
||||
filterMap filterMap |
||||
mapIndex uint32 |
||||
lastBlock uint64 |
||||
lastBlockId common.Hash |
||||
blockLvPtrs []uint64 // start pointers of blocks starting in this map; last one is lastBlock
|
||||
finished bool // iterator finished; all values rendered
|
||||
headDelimiter uint64 // if finished then points to the future block delimiter of the head block
|
||||
} |
||||
|
||||
// firstBlock returns the first block number that starts in the given map.
|
||||
func (r *renderedMap) firstBlock() uint64 { |
||||
return r.lastBlock + 1 - uint64(len(r.blockLvPtrs)) |
||||
} |
||||
|
||||
// renderMapsBefore creates a mapRenderer that renders the log index until the
|
||||
// specified map index boundary, starting from the latest available starting
|
||||
// point that is consistent with the current targetView.
|
||||
// The renderer ensures that filterMapsRange, indexedView and the actual map
|
||||
// data are always consistent with each other. If afterLastMap is greater than
|
||||
// the latest existing rendered map then indexedView is updated to targetView,
|
||||
// otherwise it is checked that the rendered range is consistent with both
|
||||
// views.
|
||||
func (f *FilterMaps) renderMapsBefore(afterLastMap uint32) (*mapRenderer, error) { |
||||
nextMap, startBlock, startLvPtr, err := f.lastCanonicalMapBoundaryBefore(afterLastMap) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
if snapshot := f.lastCanonicalSnapshotBefore(afterLastMap); snapshot != nil && snapshot.mapIndex >= nextMap { |
||||
return f.renderMapsFromSnapshot(snapshot) |
||||
} |
||||
if nextMap >= afterLastMap { |
||||
return nil, nil |
||||
} |
||||
return f.renderMapsFromMapBoundary(nextMap, afterLastMap, startBlock, startLvPtr) |
||||
} |
||||
|
||||
// renderMapsFromSnapshot creates a mapRenderer that starts rendering from a
|
||||
// snapshot made at a block boundary.
|
||||
func (f *FilterMaps) renderMapsFromSnapshot(cp *renderedMap) (*mapRenderer, error) { |
||||
f.testSnapshotUsed = true |
||||
iter, err := f.newLogIteratorFromBlockDelimiter(cp.lastBlock) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &mapRenderer{ |
||||
f: f, |
||||
currentMap: &renderedMap{ |
||||
filterMap: cp.filterMap.copy(), |
||||
mapIndex: cp.mapIndex, |
||||
lastBlock: cp.lastBlock, |
||||
blockLvPtrs: cp.blockLvPtrs, |
||||
}, |
||||
finishedMaps: make(map[uint32]*renderedMap), |
||||
firstFinished: cp.mapIndex, |
||||
afterLastFinished: cp.mapIndex, |
||||
afterLastMap: math.MaxUint32, |
||||
iterator: iter, |
||||
}, nil |
||||
} |
||||
|
||||
// renderMapsFromMapBoundary creates a mapRenderer that starts rendering at a
|
||||
// map boundary.
|
||||
func (f *FilterMaps) renderMapsFromMapBoundary(firstMap, afterLastMap uint32, startBlock, startLvPtr uint64) (*mapRenderer, error) { |
||||
iter, err := f.newLogIteratorFromMapBoundary(firstMap, startBlock, startLvPtr) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &mapRenderer{ |
||||
f: f, |
||||
currentMap: &renderedMap{ |
||||
filterMap: f.emptyFilterMap(), |
||||
mapIndex: firstMap, |
||||
lastBlock: iter.blockNumber, |
||||
}, |
||||
finishedMaps: make(map[uint32]*renderedMap), |
||||
firstFinished: firstMap, |
||||
afterLastFinished: firstMap, |
||||
afterLastMap: afterLastMap, |
||||
iterator: iter, |
||||
}, nil |
||||
} |
||||
|
||||
// lastCanonicalSnapshotBefore returns the latest cached snapshot that matches
|
||||
// the current targetView.
|
||||
func (f *FilterMaps) lastCanonicalSnapshotBefore(afterLastMap uint32) *renderedMap { |
||||
var best *renderedMap |
||||
for _, blockNumber := range f.renderSnapshots.Keys() { |
||||
if cp, _ := f.renderSnapshots.Get(blockNumber); cp != nil && blockNumber < f.afterLastIndexedBlock && |
||||
blockNumber <= f.targetView.headNumber() && f.targetView.getBlockId(blockNumber) == cp.lastBlockId && |
||||
cp.mapIndex < afterLastMap && (best == nil || blockNumber > best.lastBlock) { |
||||
best = cp |
||||
} |
||||
} |
||||
return best |
||||
} |
||||
|
||||
// lastCanonicalMapBoundaryBefore returns the latest map boundary before the
|
||||
// specified map index that matches the current targetView. This can either
|
||||
// be a checkpoint (hardcoded or left from a previously unindexed tail epoch)
|
||||
// or the boundary of a currently rendered map.
|
||||
// Along with the next map index where the rendering can be started, the number
|
||||
// and starting log value pointer of the last block is also returned.
|
||||
func (f *FilterMaps) lastCanonicalMapBoundaryBefore(afterLastMap uint32) (nextMap uint32, startBlock, startLvPtr uint64, err error) { |
||||
if !f.initialized { |
||||
return 0, 0, 0, nil |
||||
} |
||||
mapIndex := afterLastMap |
||||
for { |
||||
var ok bool |
||||
if mapIndex, ok = f.lastMapBoundaryBefore(mapIndex); !ok { |
||||
return 0, 0, 0, nil |
||||
} |
||||
lastBlock, _, err := f.getLastBlockOfMap(mapIndex) |
||||
if err != nil { |
||||
return 0, 0, 0, err |
||||
} |
||||
if lastBlock >= f.indexedView.headNumber() || lastBlock >= f.targetView.headNumber() || |
||||
!matchViews(f.indexedView, f.targetView, lastBlock) { |
||||
// map is not full or inconsistent with targetView; roll back
|
||||
continue |
||||
} |
||||
lvPtr, err := f.getBlockLvPointer(lastBlock) |
||||
if err != nil { |
||||
return 0, 0, 0, err |
||||
} |
||||
return mapIndex + 1, lastBlock, lvPtr, nil |
||||
} |
||||
} |
||||
|
||||
// lastMapBoundaryBefore returns the latest map boundary before the specified
|
||||
// map index.
|
||||
func (f *FilterMaps) lastMapBoundaryBefore(mapIndex uint32) (uint32, bool) { |
||||
if !f.initialized || f.afterLastRenderedMap == 0 { |
||||
return 0, false |
||||
} |
||||
if mapIndex > f.afterLastRenderedMap { |
||||
mapIndex = f.afterLastRenderedMap |
||||
} |
||||
if mapIndex > f.firstRenderedMap { |
||||
return mapIndex - 1, true |
||||
} |
||||
if mapIndex+f.mapsPerEpoch > f.firstRenderedMap { |
||||
if mapIndex > f.firstRenderedMap-f.mapsPerEpoch+f.tailPartialEpoch { |
||||
mapIndex = f.firstRenderedMap - f.mapsPerEpoch + f.tailPartialEpoch |
||||
} |
||||
} else { |
||||
mapIndex = (mapIndex >> f.logMapsPerEpoch) << f.logMapsPerEpoch |
||||
} |
||||
if mapIndex == 0 { |
||||
return 0, false |
||||
} |
||||
return mapIndex - 1, true |
||||
} |
||||
|
||||
// emptyFilterMap returns an empty filter map.
|
||||
func (f *FilterMaps) emptyFilterMap() filterMap { |
||||
return make(filterMap, f.mapHeight) |
||||
} |
||||
|
||||
// loadHeadSnapshot loads the last rendered map from the database and creates
|
||||
// a snapshot.
|
||||
func (f *FilterMaps) loadHeadSnapshot() error { |
||||
fm, err := f.getFilterMap(f.afterLastRenderedMap - 1) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
lastBlock, _, err := f.getLastBlockOfMap(f.afterLastRenderedMap - 1) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
var firstBlock uint64 |
||||
if f.afterLastRenderedMap > 1 { |
||||
prevLastBlock, _, err := f.getLastBlockOfMap(f.afterLastRenderedMap - 2) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
firstBlock = prevLastBlock + 1 |
||||
} |
||||
lvPtrs := make([]uint64, lastBlock+1-firstBlock) |
||||
for i := range lvPtrs { |
||||
lvPtrs[i], err = f.getBlockLvPointer(firstBlock + uint64(i)) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
} |
||||
f.renderSnapshots.Add(f.targetBlockNumber, &renderedMap{ |
||||
filterMap: fm, |
||||
mapIndex: f.afterLastRenderedMap - 1, |
||||
lastBlock: f.targetBlockNumber, |
||||
lastBlockId: f.targetBlockId, |
||||
blockLvPtrs: lvPtrs, |
||||
finished: true, |
||||
headDelimiter: f.headBlockDelimiter, |
||||
}) |
||||
return nil |
||||
} |
||||
|
||||
// makeSnapshot creates a snapshot of the current state of the rendered map.
|
||||
func (r *mapRenderer) makeSnapshot() { |
||||
r.f.renderSnapshots.Add(r.iterator.blockNumber, &renderedMap{ |
||||
filterMap: r.currentMap.filterMap.copy(), |
||||
mapIndex: r.currentMap.mapIndex, |
||||
lastBlock: r.iterator.blockNumber, |
||||
lastBlockId: r.f.targetView.getBlockId(r.currentMap.lastBlock), |
||||
blockLvPtrs: r.currentMap.blockLvPtrs, |
||||
finished: true, |
||||
headDelimiter: r.iterator.lvIndex, |
||||
}) |
||||
} |
||||
|
||||
// run does the actual map rendering. It periodically calls the stopCb callback
|
||||
// and if it returns true the process is interrupted an can be resumed later
|
||||
// by calling run again. The writeCb callback is called after new maps have
|
||||
// been written to disk and the index range has been updated accordingly.
|
||||
func (r *mapRenderer) run(stopCb func() bool, writeCb func()) (bool, error) { |
||||
for { |
||||
if done, err := r.renderCurrentMap(stopCb); !done { |
||||
return done, err // stopped or failed
|
||||
} |
||||
// map finished
|
||||
r.finishedMaps[r.currentMap.mapIndex] = r.currentMap |
||||
r.afterLastFinished++ |
||||
if len(r.finishedMaps) >= maxMapsPerBatch || r.afterLastFinished & (r.f.baseRowGroupLength-1) == 0 { |
||||
if err := r.writeFinishedMaps(stopCb); err != nil { |
||||
return false, err |
||||
} |
||||
writeCb() |
||||
} |
||||
if r.afterLastFinished == r.afterLastMap || r.iterator.finished { |
||||
if err := r.writeFinishedMaps(stopCb); err != nil { |
||||
return false, err |
||||
} |
||||
writeCb() |
||||
return true, nil |
||||
} |
||||
r.currentMap = &renderedMap{ |
||||
filterMap: r.f.emptyFilterMap(), |
||||
mapIndex: r.afterLastFinished, |
||||
} |
||||
} |
||||
} |
||||
|
||||
// renderCurrentMap renders a single map.
|
||||
func (r *mapRenderer) renderCurrentMap(stopCb func() bool) (bool, error) { |
||||
if !r.iterator.updateChainView(r.f.targetView) { |
||||
return false, errChainUpdate |
||||
} |
||||
var waitCnt int |
||||
|
||||
if r.iterator.lvIndex == 0 { |
||||
r.currentMap.blockLvPtrs = []uint64{0} |
||||
} |
||||
type lvPos struct{ rowIndex, layerIndex uint32 } |
||||
rowIndexCache := lru.NewCache[common.Hash, lvPos](10000) |
||||
defer rowIndexCache.Purge() |
||||
|
||||
for r.iterator.lvIndex < uint64(r.currentMap.mapIndex+1)<<r.f.logValuesPerMap && !r.iterator.finished { |
||||
waitCnt++ |
||||
if waitCnt >= valuesPerCallback { |
||||
if stopCb() { |
||||
return false, nil |
||||
} |
||||
if !r.iterator.updateChainView(r.f.targetView) { |
||||
return false, errChainUpdate |
||||
} |
||||
waitCnt = 0 |
||||
} |
||||
r.currentMap.lastBlock = r.iterator.blockNumber |
||||
if r.iterator.delimiter { |
||||
r.currentMap.lastBlock++ |
||||
r.currentMap.blockLvPtrs = append(r.currentMap.blockLvPtrs, r.iterator.lvIndex+1) |
||||
} |
||||
if logValue := r.iterator.getValueHash(); logValue != (common.Hash{}) { |
||||
lvp, cached := rowIndexCache.Get(logValue) |
||||
if !cached { |
||||
lvp = lvPos{rowIndex: r.f.rowIndex(r.currentMap.mapIndex, 0, logValue)} |
||||
} |
||||
for uint32(len(r.currentMap.filterMap[lvp.rowIndex])) >= r.f.maxRowLength(lvp.layerIndex) { |
||||
lvp.layerIndex++ |
||||
lvp.rowIndex = r.f.rowIndex(r.currentMap.mapIndex, lvp.layerIndex, logValue) |
||||
cached = false |
||||
} |
||||
r.currentMap.filterMap[lvp.rowIndex] = append(r.currentMap.filterMap[lvp.rowIndex], r.f.columnIndex(r.iterator.lvIndex, logValue)) |
||||
if !cached { |
||||
rowIndexCache.Add(logValue, lvp) |
||||
} |
||||
} |
||||
if err := r.iterator.next(); err != nil { |
||||
return false, err |
||||
} |
||||
if !r.f.testDisableSnapshots && r.afterLastMap >= r.f.afterLastRenderedMap && |
||||
(r.iterator.delimiter || r.iterator.finished) { |
||||
r.makeSnapshot() |
||||
} |
||||
} |
||||
if r.iterator.finished { |
||||
r.currentMap.finished = true |
||||
r.currentMap.headDelimiter = r.iterator.lvIndex |
||||
} |
||||
r.currentMap.lastBlockId = r.f.targetView.getBlockId(r.currentMap.lastBlock) |
||||
return true, nil |
||||
} |
||||
|
||||
// writeFinishedMaps writes rendered maps to the database and updates
|
||||
// filterMapsRange and indexedView accordingly.
|
||||
func (r *mapRenderer) writeFinishedMaps(pauseCb func() bool) error { |
||||
if len(r.finishedMaps) == 0 { |
||||
return nil |
||||
} |
||||
r.f.indexLock.Lock() |
||||
defer r.f.indexLock.Unlock() |
||||
|
||||
oldRange := r.f.filterMapsRange |
||||
tempRange, err := r.getTempRange() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
newRange, err := r.getUpdatedRange() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
renderedView := r.f.targetView // stopCb callback might still change targetView while writing finished maps
|
||||
|
||||
batch := r.f.db.NewBatch() |
||||
var writeCnt int |
||||
checkWriteCnt := func() { |
||||
writeCnt++ |
||||
if writeCnt == rowsPerBatch { |
||||
writeCnt = 0 |
||||
if err := batch.Write(); err != nil { |
||||
log.Crit("Error writing log index update batch", "error", err) |
||||
} |
||||
// do not exit while in partially written state but do allow processing
|
||||
// events and pausing while block processing is in progress
|
||||
pauseCb() |
||||
batch = r.f.db.NewBatch() |
||||
} |
||||
} |
||||
|
||||
r.f.setRange(batch, tempRange, false) |
||||
// add or update filter rows
|
||||
for rowIndex := uint32(0); rowIndex < r.f.mapHeight; rowIndex++ { |
||||
var ( |
||||
mapIndices []uint32 |
||||
rows []FilterRow |
||||
) |
||||
for mapIndex := r.firstFinished; mapIndex < r.afterLastFinished; mapIndex++ { |
||||
row := r.finishedMaps[mapIndex].filterMap[rowIndex] |
||||
if fm, _ := r.f.filterMapCache.Get(mapIndex); fm != nil && row.Equal(fm[rowIndex]) { |
||||
continue |
||||
} |
||||
mapIndices = append(mapIndices, mapIndex) |
||||
rows = append(rows, row) |
||||
} |
||||
if newRange.afterLastRenderedMap == r.afterLastFinished { // head updated; remove future entries
|
||||
for mapIndex := r.afterLastFinished; mapIndex < oldRange.afterLastRenderedMap; mapIndex++ { |
||||
if fm, _ := r.f.filterMapCache.Get(mapIndex); fm != nil && len(fm[rowIndex]) == 0 { |
||||
continue |
||||
} |
||||
mapIndices = append(mapIndices, mapIndex) |
||||
rows = append(rows, nil) |
||||
} |
||||
} |
||||
if err := r.f.storeFilterMapRows(batch, mapIndices, rowIndex, rows); err != nil { |
||||
return err |
||||
} |
||||
checkWriteCnt() |
||||
} |
||||
// update filter map cache
|
||||
if newRange.afterLastRenderedMap == r.afterLastFinished { |
||||
for mapIndex := r.firstFinished; mapIndex < r.afterLastFinished; mapIndex++ { |
||||
r.f.filterMapCache.Add(mapIndex, r.finishedMaps[mapIndex].filterMap) |
||||
} |
||||
for mapIndex := r.afterLastFinished; mapIndex < oldRange.afterLastRenderedMap; mapIndex++ { |
||||
r.f.filterMapCache.Remove(mapIndex) |
||||
} |
||||
} else { |
||||
for mapIndex := r.firstFinished; mapIndex < r.afterLastFinished; mapIndex++ { |
||||
r.f.filterMapCache.Remove(mapIndex) |
||||
} |
||||
} |
||||
// add or update block pointers
|
||||
blockNumber := r.finishedMaps[r.firstFinished].firstBlock() |
||||
for mapIndex := r.firstFinished; mapIndex < r.afterLastFinished; mapIndex++ { |
||||
renderedMap := r.finishedMaps[mapIndex] |
||||
r.f.storeLastBlockOfMap(batch, mapIndex, renderedMap.lastBlock, renderedMap.lastBlockId) |
||||
checkWriteCnt() |
||||
if blockNumber != renderedMap.firstBlock() { |
||||
panic("non-continuous block numbers") |
||||
} |
||||
for _, lvPtr := range renderedMap.blockLvPtrs { |
||||
r.f.storeBlockLvPointer(batch, blockNumber, lvPtr) |
||||
checkWriteCnt() |
||||
blockNumber++ |
||||
} |
||||
} |
||||
if newRange.afterLastRenderedMap == r.afterLastFinished { // head updated; remove future entries
|
||||
for mapIndex := r.afterLastFinished; mapIndex < oldRange.afterLastRenderedMap; mapIndex++ { |
||||
r.f.deleteLastBlockOfMap(batch, mapIndex) |
||||
checkWriteCnt() |
||||
} |
||||
for ; blockNumber < oldRange.afterLastIndexedBlock; blockNumber++ { |
||||
r.f.deleteBlockLvPointer(batch, blockNumber) |
||||
checkWriteCnt() |
||||
} |
||||
} |
||||
r.finishedMaps = make(map[uint32]*renderedMap) |
||||
r.firstFinished = r.afterLastFinished |
||||
r.f.indexedView = renderedView |
||||
r.f.setRange(batch, newRange, true) |
||||
if err := batch.Write(); err != nil { |
||||
log.Crit("Error writing log index update batch", "error", err) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// getTempRange returns a temporary filterMapsRange that is committed to the
|
||||
// database while the newly rendered maps are partially written. Writing all
|
||||
// processed maps in a single database batch would be a serious hit on db
|
||||
// performance so instead safety is ensured by first reverting the valid map
|
||||
// range to the unchanged region until all new map data is committed.
|
||||
func (r *mapRenderer) getTempRange() (filterMapsRange, error) { |
||||
tempRange := r.f.filterMapsRange |
||||
if err := tempRange.addRenderedRange(r.firstFinished, r.firstFinished, r.afterLastMap, r.f.mapsPerEpoch); err != nil { |
||||
return filterMapsRange{}, err |
||||
} |
||||
if tempRange.firstRenderedMap != r.f.firstRenderedMap { |
||||
// first rendered map changed; update first indexed block
|
||||
if tempRange.firstRenderedMap > 0 { |
||||
lastBlock, _, err := r.f.getLastBlockOfMap(tempRange.firstRenderedMap - 1) |
||||
if err != nil { |
||||
return filterMapsRange{}, err |
||||
} |
||||
tempRange.firstIndexedBlock = lastBlock + 1 |
||||
} else { |
||||
tempRange.firstIndexedBlock = 0 |
||||
} |
||||
} |
||||
if tempRange.afterLastRenderedMap != r.f.afterLastRenderedMap { |
||||
// first rendered map changed; update first indexed block
|
||||
if tempRange.afterLastRenderedMap > 0 { |
||||
lastBlock, _, err := r.f.getLastBlockOfMap(tempRange.afterLastRenderedMap - 1) |
||||
if err != nil { |
||||
return filterMapsRange{}, err |
||||
} |
||||
tempRange.afterLastIndexedBlock = lastBlock + 1 |
||||
} else { |
||||
tempRange.afterLastIndexedBlock = 0 |
||||
} |
||||
tempRange.headBlockDelimiter = 0 |
||||
} |
||||
return tempRange, nil |
||||
} |
||||
|
||||
// getUpdatedRange returns the updated filterMapsRange after writing the newly
|
||||
// rendered maps.
|
||||
func (r *mapRenderer) getUpdatedRange() (filterMapsRange, error) { |
||||
// update filterMapsRange
|
||||
newRange := r.f.filterMapsRange |
||||
if err := newRange.addRenderedRange(r.firstFinished, r.afterLastFinished, r.afterLastMap, r.f.mapsPerEpoch); err != nil { |
||||
return filterMapsRange{}, err |
||||
} |
||||
if newRange.firstRenderedMap != r.f.firstRenderedMap { |
||||
// first rendered map changed; update first indexed block
|
||||
if newRange.firstRenderedMap > 0 { |
||||
lastBlock, _, err := r.f.getLastBlockOfMap(newRange.firstRenderedMap - 1) |
||||
if err != nil { |
||||
return filterMapsRange{}, err |
||||
} |
||||
newRange.firstIndexedBlock = lastBlock + 1 |
||||
} else { |
||||
newRange.firstIndexedBlock = 0 |
||||
} |
||||
} |
||||
if newRange.afterLastRenderedMap == r.afterLastFinished { |
||||
// last rendered map changed; update last indexed block and head pointers
|
||||
newRange.targetBlockNumber = r.f.targetView.headNumber() |
||||
newRange.targetBlockId = r.f.targetView.getBlockId(newRange.targetBlockNumber) |
||||
lm := r.finishedMaps[r.afterLastFinished-1] |
||||
if lm.finished { |
||||
newRange.afterLastIndexedBlock = newRange.targetBlockNumber + 1 |
||||
if lm.lastBlock != newRange.targetBlockNumber { |
||||
panic("map rendering finished but last block != head block") |
||||
} |
||||
newRange.headBlockDelimiter = lm.headDelimiter |
||||
} else { |
||||
newRange.afterLastIndexedBlock = lm.lastBlock |
||||
newRange.headBlockDelimiter = 0 |
||||
} |
||||
|
||||
} else { |
||||
// last rendered map not replaced; ensure that target chain view matches
|
||||
// indexed chain view on the rendered section
|
||||
if lastBlock := r.finishedMaps[r.afterLastFinished-1].lastBlock; !matchViews(r.f.indexedView, r.f.targetView, lastBlock) { |
||||
return filterMapsRange{}, errChainUpdate |
||||
} |
||||
} |
||||
return newRange, nil |
||||
} |
||||
|
||||
// addRenderedRange adds the range [firstRendered, afterLastRendered) and
|
||||
// removes [afterLastRendered, afterLastRemoved) from the set of rendered maps.
|
||||
func (fmr *filterMapsRange) addRenderedRange(firstRendered, afterLastRendered, afterLastRemoved, mapsPerEpoch uint32) error { |
||||
if !fmr.initialized { |
||||
return errors.New("log index not initialized") |
||||
} |
||||
type endpoint struct { |
||||
m uint32 |
||||
d int |
||||
} |
||||
endpoints := []endpoint{{fmr.firstRenderedMap, 1}, {fmr.afterLastRenderedMap, -1}, {firstRendered, 1}, {afterLastRendered, -101}, {afterLastRemoved, 100}} |
||||
if fmr.tailPartialEpoch > 0 { |
||||
endpoints = append(endpoints, []endpoint{{fmr.firstRenderedMap - mapsPerEpoch, 1}, {fmr.firstRenderedMap - mapsPerEpoch + fmr.tailPartialEpoch, -1}}...) |
||||
} |
||||
sort.Slice(endpoints, func(i, j int) bool { return endpoints[i].m < endpoints[j].m }) |
||||
var ( |
||||
sum int |
||||
merged []uint32 |
||||
last bool |
||||
) |
||||
for i, e := range endpoints { |
||||
sum += e.d |
||||
if i < len(endpoints)-1 && endpoints[i+1].m == e.m { |
||||
continue |
||||
} |
||||
if (sum > 0) != last { |
||||
merged = append(merged, e.m) |
||||
last = !last |
||||
} |
||||
} |
||||
if len(merged) == 0 { |
||||
fmr.tailPartialEpoch = 0 |
||||
fmr.firstRenderedMap = firstRendered |
||||
fmr.afterLastRenderedMap = firstRendered |
||||
return nil |
||||
} |
||||
if len(merged) == 2 { |
||||
fmr.tailPartialEpoch = 0 |
||||
fmr.firstRenderedMap = merged[0] |
||||
fmr.afterLastRenderedMap = merged[1] |
||||
return nil |
||||
} |
||||
if len(merged) == 4 { |
||||
if merged[2] != merged[0]+mapsPerEpoch { |
||||
return errors.New("invalid tail partial epoch") |
||||
} |
||||
fmr.tailPartialEpoch = merged[1] - merged[0] |
||||
fmr.firstRenderedMap = merged[2] |
||||
fmr.afterLastRenderedMap = merged[3] |
||||
return nil |
||||
} |
||||
return errors.New("invalid number of rendered sections") |
||||
} |
||||
|
||||
// logIterator iterates on the linear log value index range.
|
||||
type logIterator struct { |
||||
chainView chainView |
||||
blockNumber uint64 |
||||
receipts types.Receipts |
||||
blockStart, delimiter, finished bool |
||||
txIndex, logIndex, topicIndex int |
||||
lvIndex uint64 |
||||
} |
||||
|
||||
var errUnindexedRange = errors.New("unindexed range") |
||||
|
||||
// newLogIteratorFromBlockDelimiter creates a logIterator starting at the
|
||||
// given block's first log value entry (the block delimiter), according to the
|
||||
// current targetView.
|
||||
func (f *FilterMaps) newLogIteratorFromBlockDelimiter(blockNumber uint64) (*logIterator, error) { |
||||
if blockNumber > f.targetView.headNumber() { |
||||
return nil, errors.New("iterator entry point after target chain head") |
||||
} |
||||
if blockNumber < f.firstIndexedBlock || blockNumber >= f.afterLastIndexedBlock { |
||||
return nil, errUnindexedRange |
||||
} |
||||
if !matchViews(f.indexedView, f.targetView, blockNumber) { |
||||
return nil, errors.New("target and indexed views diverged at iterator entry point") |
||||
} |
||||
var lvIndex uint64 |
||||
if blockNumber == f.targetBlockNumber { |
||||
lvIndex = f.headBlockDelimiter |
||||
} else { |
||||
var err error |
||||
lvIndex, err = f.getBlockLvPointer(blockNumber + 1) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
lvIndex-- |
||||
} |
||||
finished := blockNumber == f.targetView.headNumber() |
||||
return &logIterator{ |
||||
chainView: f.targetView, |
||||
blockNumber: blockNumber, |
||||
finished: finished, |
||||
delimiter: !finished, |
||||
lvIndex: lvIndex, |
||||
}, nil |
||||
} |
||||
|
||||
// newLogIteratorFromMapBoundary creates a logIterator starting at the given
|
||||
// map boundary, according to the current targetView.
|
||||
func (f *FilterMaps) newLogIteratorFromMapBoundary(mapIndex uint32, startBlock, startLvPtr uint64) (*logIterator, error) { |
||||
if startBlock > f.targetView.headNumber() { |
||||
return nil, errors.New("iterator entry point after target chain head") |
||||
} |
||||
if !matchViews(f.indexedView, f.targetView, startBlock) { |
||||
return nil, errors.New("target and indexed views diverged at iterator entry point") |
||||
} |
||||
// get block receipts
|
||||
receipts := f.targetView.getReceipts(startBlock) |
||||
if receipts == nil { |
||||
return nil, errors.New("receipts not found") |
||||
} |
||||
// initialize iterator at block start
|
||||
l := &logIterator{ |
||||
chainView: f.targetView, |
||||
blockNumber: startBlock, |
||||
receipts: receipts, |
||||
blockStart: true, |
||||
lvIndex: startLvPtr, |
||||
} |
||||
l.nextValid() |
||||
targetIndex := uint64(mapIndex) << f.logValuesPerMap |
||||
if l.lvIndex > targetIndex { |
||||
panic("last map block's lvPointer > map boundary") |
||||
} |
||||
// iterate to map boundary
|
||||
for l.lvIndex < targetIndex { |
||||
if l.finished { |
||||
panic("iterator already finished") |
||||
} |
||||
if err := l.next(); err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
return l, nil |
||||
} |
||||
|
||||
// updateChainView updates the iterator's chain view if it still matches the
|
||||
// previous view at the current position. Returns true if successful.
|
||||
func (l *logIterator) updateChainView(cv chainView) bool { |
||||
if !matchViews(cv, l.chainView, l.blockNumber) { |
||||
return false |
||||
} |
||||
l.chainView = cv |
||||
return true |
||||
} |
||||
|
||||
// getValueHash returns the log value hash at the current position.
|
||||
func (l *logIterator) getValueHash() common.Hash { |
||||
if l.delimiter || l.finished { |
||||
return common.Hash{} |
||||
} |
||||
log := l.receipts[l.txIndex].Logs[l.logIndex] |
||||
if l.topicIndex == 0 { |
||||
return addressValue(log.Address) |
||||
} |
||||
return topicValue(log.Topics[l.topicIndex-1]) |
||||
} |
||||
|
||||
// next moves the iterator to the next log value index.
|
||||
func (l *logIterator) next() error { |
||||
if l.finished { |
||||
return nil |
||||
} |
||||
if l.delimiter { |
||||
l.delimiter = false |
||||
l.blockNumber++ |
||||
l.receipts = l.chainView.getReceipts(l.blockNumber) |
||||
if l.receipts == nil { |
||||
return errors.New("receipts not found") |
||||
} |
||||
l.txIndex, l.logIndex, l.topicIndex, l.blockStart = 0, 0, 0, true |
||||
} else { |
||||
l.topicIndex++ |
||||
l.blockStart = false |
||||
} |
||||
l.lvIndex++ |
||||
l.nextValid() |
||||
return nil |
||||
} |
||||
|
||||
// nextValid updates the internal transaction, log and topic index pointers
|
||||
// to the next existing log value of the given block if necessary.
|
||||
// Note that nextValid does not advance the log value index pointer.
|
||||
func (l *logIterator) nextValid() { |
||||
for ; l.txIndex < len(l.receipts); l.txIndex++ { |
||||
receipt := l.receipts[l.txIndex] |
||||
for ; l.logIndex < len(receipt.Logs); l.logIndex++ { |
||||
log := receipt.Logs[l.logIndex] |
||||
if l.topicIndex <= len(log.Topics) { |
||||
return |
||||
} |
||||
l.topicIndex = 0 |
||||
} |
||||
l.logIndex = 0 |
||||
} |
||||
if l.blockNumber == l.chainView.headNumber() { |
||||
l.finished = true |
||||
} else { |
||||
l.delimiter = true |
||||
} |
||||
} |
@ -0,0 +1,918 @@ |
||||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package filtermaps |
||||
|
||||
import ( |
||||
"context" |
||||
"errors" |
||||
"math" |
||||
"sync" |
||||
"sync/atomic" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/common/mclock" |
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
) |
||||
|
||||
const useTimeStats = true //TODO set to false before merging
|
||||
|
||||
// ErrMatchAll is returned when the specified filter matches everything.
|
||||
// Handling this case in filtermaps would require an extra special case and
|
||||
// would actually be slower than reverting to legacy filter.
|
||||
var ErrMatchAll = errors.New("match all patterns not supported") |
||||
|
||||
// MatcherBackend defines the functions required for searching in the log index
|
||||
// data structure. It is currently implemented by FilterMapsMatcherBackend but
|
||||
// once EIP-7745 is implemented and active, these functions can also be trustlessly
|
||||
// served by a remote prover.
|
||||
type MatcherBackend interface { |
||||
GetParams() *Params |
||||
GetBlockLvPointer(ctx context.Context, blockNumber uint64) (uint64, error) |
||||
GetFilterMapRow(ctx context.Context, mapIndex, rowIndex uint32, baseLayerOnly bool) (FilterRow, error) |
||||
GetLogByLvIndex(ctx context.Context, lvIndex uint64) (*types.Log, error) |
||||
SyncLogIndex(ctx context.Context) (SyncRange, error) |
||||
Close() |
||||
} |
||||
|
||||
// SyncRange is returned by MatcherBackend.SyncLogIndex. It contains the latest
|
||||
// chain head, the indexed range that is currently consistent with the chain
|
||||
// and the valid range that has not been changed and has been consistent with
|
||||
// all states of the chain since the previous SyncLogIndex or the creation of
|
||||
// the matcher backend.
|
||||
type SyncRange struct { |
||||
HeadNumber uint64 |
||||
// block range where the index has not changed since the last matcher sync
|
||||
// and therefore the set of matches found in this region is guaranteed to
|
||||
// be valid and complete.
|
||||
Valid bool |
||||
FirstValid, LastValid uint64 |
||||
// block range indexed according to the given chain head.
|
||||
Indexed bool |
||||
FirstIndexed, LastIndexed uint64 |
||||
} |
||||
|
||||
// GetPotentialMatches returns a list of logs that are potential matches for the
|
||||
// given filter criteria. If parts of the log index in the searched range are
|
||||
// missing or changed during the search process then the resulting logs belonging
|
||||
// to that block range might be missing or incorrect.
|
||||
// Also note that the returned list may contain false positives.
|
||||
func GetPotentialMatches(ctx context.Context, backend MatcherBackend, firstBlock, lastBlock uint64, addresses []common.Address, topics [][]common.Hash) ([]*types.Log, error) { |
||||
params := backend.GetParams() |
||||
// find the log value index range to search
|
||||
firstIndex, err := backend.GetBlockLvPointer(ctx, firstBlock) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
lastIndex, err := backend.GetBlockLvPointer(ctx, lastBlock+1) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
if lastIndex > 0 { |
||||
lastIndex-- |
||||
} |
||||
firstMap, lastMap := uint32(firstIndex>>params.logValuesPerMap), uint32(lastIndex>>params.logValuesPerMap) |
||||
firstEpoch, lastEpoch := firstMap>>params.logMapsPerEpoch, lastMap>>params.logMapsPerEpoch |
||||
|
||||
// build matcher according to the given filter criteria
|
||||
matchers := make([]matcher, len(topics)+1) |
||||
// matchAddress signals a match when there is a match for any of the given
|
||||
// addresses.
|
||||
// If the list of addresses is empty then it creates a "wild card" matcher
|
||||
// that signals every index as a potential match.
|
||||
matchAddress := make(matchAny, len(addresses)) |
||||
for i, address := range addresses { |
||||
matchAddress[i] = &singleMatcher{backend: backend, value: addressValue(address)} |
||||
} |
||||
matchers[0] = matchAddress |
||||
for i, topicList := range topics { |
||||
// matchTopic signals a match when there is a match for any of the topics
|
||||
// specified for the given position (topicList).
|
||||
// If topicList is empty then it creates a "wild card" matcher that signals
|
||||
// every index as a potential match.
|
||||
matchTopic := make(matchAny, len(topicList)) |
||||
for j, topic := range topicList { |
||||
matchTopic[j] = &singleMatcher{backend: backend, value: topicValue(topic)} |
||||
} |
||||
matchers[i+1] = matchTopic |
||||
} |
||||
// matcher is the final sequence matcher that signals a match when all underlying
|
||||
// matchers signal a match for consecutive log value indices.
|
||||
matcher := newMatchSequence(params, matchers) |
||||
|
||||
// processEpoch returns the potentially matching logs from the given epoch.
|
||||
processEpoch := func(epochIndex uint32) ([]*types.Log, error) { |
||||
var logs []*types.Log |
||||
// create a list of map indices to process
|
||||
fm, lm := epochIndex<<params.logMapsPerEpoch, (epochIndex+1)<<params.logMapsPerEpoch-1 |
||||
if fm < firstMap { |
||||
fm = firstMap |
||||
} |
||||
if lm > lastMap { |
||||
lm = lastMap |
||||
} |
||||
//
|
||||
mapIndices := make([]uint32, lm+1-fm) |
||||
for i := range mapIndices { |
||||
mapIndices[i] = fm + uint32(i) |
||||
} |
||||
// find potential matches
|
||||
matches, err := getAllMatches(ctx, matcher, mapIndices) |
||||
if err != nil { |
||||
return logs, err |
||||
} |
||||
// get the actual logs located at the matching log value indices
|
||||
for _, m := range matches { |
||||
if m == nil { |
||||
return nil, ErrMatchAll |
||||
} |
||||
mlogs, err := getLogsFromMatches(ctx, backend, firstIndex, lastIndex, m) |
||||
if err != nil { |
||||
return logs, err |
||||
} |
||||
logs = append(logs, mlogs...) |
||||
} |
||||
return logs, nil |
||||
} |
||||
|
||||
type task struct { |
||||
epochIndex uint32 |
||||
logs []*types.Log |
||||
err error |
||||
done chan struct{} |
||||
} |
||||
|
||||
taskCh := make(chan *task) |
||||
var wg sync.WaitGroup |
||||
defer func() { |
||||
close(taskCh) |
||||
wg.Wait() |
||||
}() |
||||
|
||||
worker := func() { |
||||
for task := range taskCh { |
||||
if task == nil { |
||||
break |
||||
} |
||||
task.logs, task.err = processEpoch(task.epochIndex) |
||||
close(task.done) |
||||
} |
||||
wg.Done() |
||||
} |
||||
|
||||
start := time.Now() |
||||
for i := 0; i < 4; i++ { |
||||
wg.Add(1) |
||||
go worker() |
||||
} |
||||
|
||||
var logs []*types.Log |
||||
// startEpoch is the next task to send whenever a worker can accept it.
|
||||
// waitEpoch is the next task we are waiting for to finish in order to append
|
||||
// results in the correct order.
|
||||
startEpoch, waitEpoch := firstEpoch, firstEpoch |
||||
tasks := make(map[uint32]*task) |
||||
tasks[startEpoch] = &task{epochIndex: startEpoch, done: make(chan struct{})} |
||||
for waitEpoch <= lastEpoch { |
||||
select { |
||||
case taskCh <- tasks[startEpoch]: |
||||
startEpoch++ |
||||
if startEpoch <= lastEpoch { |
||||
if tasks[startEpoch] == nil { |
||||
tasks[startEpoch] = &task{epochIndex: startEpoch, done: make(chan struct{})} |
||||
} |
||||
} |
||||
case <-tasks[waitEpoch].done: |
||||
logs = append(logs, tasks[waitEpoch].logs...) |
||||
if err := tasks[waitEpoch].err; err != nil { |
||||
return logs, err |
||||
} |
||||
delete(tasks, waitEpoch) |
||||
waitEpoch++ |
||||
if waitEpoch <= lastEpoch { |
||||
if tasks[waitEpoch] == nil { |
||||
tasks[waitEpoch] = &task{epochIndex: waitEpoch, done: make(chan struct{})} |
||||
} |
||||
} |
||||
} |
||||
} |
||||
if useTimeStats { |
||||
log.Info("Log search finished", "elapsed", time.Since(start)) |
||||
for i, ma := range matchers { |
||||
for j, m := range ma.(matchAny) { |
||||
log.Info("Single matcher stats", "matchSequence", i, "matchAny", j) |
||||
m.(*singleMatcher).stats.print() |
||||
} |
||||
} |
||||
} |
||||
return logs, nil |
||||
} |
||||
|
||||
// getLogsFromMatches returns the list of potentially matching logs located at
|
||||
// the given list of matching log indices. Matches outside the firstIndex to
|
||||
// lastIndex range are not returned.
|
||||
func getLogsFromMatches(ctx context.Context, backend MatcherBackend, firstIndex, lastIndex uint64, matches potentialMatches) ([]*types.Log, error) { |
||||
var logs []*types.Log |
||||
for _, match := range matches { |
||||
if match < firstIndex || match > lastIndex { |
||||
continue |
||||
} |
||||
log, err := backend.GetLogByLvIndex(ctx, match) |
||||
if err != nil { |
||||
return logs, err |
||||
} |
||||
if log != nil { |
||||
logs = append(logs, log) |
||||
} |
||||
} |
||||
return logs, nil |
||||
} |
||||
|
||||
// matcher defines a general abstraction for any matcher configuration that
|
||||
// can instantiate a matcherInstance.
|
||||
type matcher interface { |
||||
newInstance(mapIndices []uint32) matcherInstance |
||||
} |
||||
|
||||
// matcherInstance defines a general abstraction for a matcher configuration
|
||||
// working on a specific set of map indices and eventually returning a list of
|
||||
// potentially matching log value indices.
|
||||
// Note that processing happens per mapping layer, each call returning a set
|
||||
// of results for the maps where the processing has been finished at the given
|
||||
// layer. Map indices can also be dropped before a result is returned for them
|
||||
// in case the result is no longer interesting. Dropping indices twice or after
|
||||
// a result has been returned has no effect. Exactly one matcherResult is
|
||||
// returned per requested map index unless dropped.
|
||||
type matcherInstance interface { |
||||
getMatchesForLayer(ctx context.Context, layerIndex uint32) ([]matcherResult, error) |
||||
dropIndices(mapIndices []uint32) |
||||
} |
||||
|
||||
// matcherResult contains the list of potentially matching log value indices
|
||||
// for a given map index.
|
||||
type matcherResult struct { |
||||
mapIndex uint32 |
||||
matches potentialMatches |
||||
} |
||||
|
||||
// getAllMatches creates an instance for a given matcher and set of map indices,
|
||||
// iterates through mapping layers and collects all results, then returns all
|
||||
// results in the same order as the map indices were specified.
|
||||
func getAllMatches(ctx context.Context, matcher matcher, mapIndices []uint32) ([]potentialMatches, error) { |
||||
instance := matcher.newInstance(mapIndices) |
||||
resultsMap := make(map[uint32]potentialMatches) |
||||
for layerIndex := uint32(0); len(resultsMap) < len(mapIndices); layerIndex++ { |
||||
results, err := instance.getMatchesForLayer(ctx, layerIndex) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
for _, result := range results { |
||||
resultsMap[result.mapIndex] = result.matches |
||||
} |
||||
} |
||||
matches := make([]potentialMatches, len(mapIndices)) |
||||
for i, mapIndex := range mapIndices { |
||||
matches[i] = resultsMap[mapIndex] |
||||
} |
||||
return matches, nil |
||||
} |
||||
|
||||
// singleMatcher implements matcher by returning matches for a single log value hash.
|
||||
type singleMatcher struct { |
||||
backend MatcherBackend |
||||
value common.Hash |
||||
stats timeStats |
||||
} |
||||
|
||||
// singleMatcherInstance is an instance of singleMatcher.
|
||||
type singleMatcherInstance struct { |
||||
*singleMatcher |
||||
mapIndices []uint32 |
||||
filterRows map[uint32][]FilterRow |
||||
} |
||||
|
||||
// newInstance creates a new instance of singleMatcher.
|
||||
func (m *singleMatcher) newInstance(mapIndices []uint32) matcherInstance { |
||||
filterRows := make(map[uint32][]FilterRow) |
||||
for _, idx := range mapIndices { |
||||
filterRows[idx] = []FilterRow{} |
||||
} |
||||
copiedIndices := make([]uint32, len(mapIndices)) |
||||
copy(copiedIndices, mapIndices) |
||||
return &singleMatcherInstance{ |
||||
singleMatcher: m, |
||||
mapIndices: copiedIndices, |
||||
filterRows: filterRows, |
||||
} |
||||
} |
||||
|
||||
// getMatchesForLayer implements matcherInstance.
|
||||
func (m *singleMatcherInstance) getMatchesForLayer(ctx context.Context, layerIndex uint32) (results []matcherResult, err error) { |
||||
var st int |
||||
m.stats.set(&st, stOther) |
||||
params := m.backend.GetParams() |
||||
maskedMapIndex, rowIndex := uint32(math.MaxUint32), uint32(0) |
||||
for _, mapIndex := range m.mapIndices { |
||||
filterRows, ok := m.filterRows[mapIndex] |
||||
if !ok { |
||||
continue |
||||
} |
||||
if mm := params.maskedMapIndex(mapIndex, layerIndex); mm != maskedMapIndex { |
||||
// only recalculate rowIndex when necessary
|
||||
m.stats.set(&st, stRowCalc) |
||||
maskedMapIndex = mm |
||||
rowIndex = params.rowIndex(mapIndex, layerIndex, m.value) |
||||
} |
||||
if layerIndex == 0 { |
||||
m.stats.set(&st, stFetchFirst) |
||||
} else { |
||||
m.stats.set(&st, stFetchMore) |
||||
} |
||||
filterRow, err := m.backend.GetFilterMapRow(ctx, mapIndex, rowIndex, layerIndex == 0) |
||||
m.stats.set(&st, stOther) |
||||
if err != nil { |
||||
m.stats.set(&st, stNone) |
||||
return nil, err |
||||
} |
||||
filterRows = append(filterRows, filterRow) |
||||
if uint32(len(filterRow)) < params.maxRowLength(layerIndex) { |
||||
m.stats.set(&st, stProcess) |
||||
results = append(results, matcherResult{ |
||||
mapIndex: mapIndex, |
||||
matches: params.potentialMatches(filterRows, mapIndex, m.value), |
||||
}) |
||||
m.stats.set(&st, stOther) |
||||
delete(m.filterRows, mapIndex) |
||||
} else { |
||||
m.filterRows[mapIndex] = filterRows |
||||
} |
||||
} |
||||
m.cleanMapIndices() |
||||
m.stats.set(&st, stNone) |
||||
return results, nil |
||||
} |
||||
|
||||
// dropIndices implements matcherInstance.
|
||||
func (m *singleMatcherInstance) dropIndices(dropIndices []uint32) { |
||||
for _, mapIndex := range dropIndices { |
||||
delete(m.filterRows, mapIndex) |
||||
} |
||||
m.cleanMapIndices() |
||||
} |
||||
|
||||
// cleanMapIndices removes map indices from the list if there is no matching
|
||||
// filterRows entry because a result has been returned or the index has been
|
||||
// dropped.
|
||||
func (m *singleMatcherInstance) cleanMapIndices() { |
||||
var j int |
||||
for i, mapIndex := range m.mapIndices { |
||||
if _, ok := m.filterRows[mapIndex]; ok { |
||||
if i != j { |
||||
m.mapIndices[j] = mapIndex |
||||
} |
||||
j++ |
||||
} |
||||
} |
||||
m.mapIndices = m.mapIndices[:j] |
||||
} |
||||
|
||||
// matchAny combinines a set of matchers and returns a match for every position
|
||||
// where any of the underlying matchers signaled a match. A zero-length matchAny
|
||||
// acts as a "wild card" that signals a potential match at every position.
|
||||
type matchAny []matcher |
||||
|
||||
// matchAnyInstance is an instance of matchAny.
|
||||
type matchAnyInstance struct { |
||||
matchAny |
||||
childInstances []matcherInstance |
||||
childResults map[uint32]matchAnyResults |
||||
} |
||||
|
||||
// matchAnyResults is used by matchAnyInstance to collect results from all
|
||||
// child matchers for a specific map index. Once all results has been received
|
||||
// a merged result is returned for the given map and this structure is discarded.
|
||||
type matchAnyResults struct { |
||||
matches []potentialMatches |
||||
done []bool |
||||
needMore int |
||||
} |
||||
|
||||
// newInstance creates a new instance of matchAny.
|
||||
func (m matchAny) newInstance(mapIndices []uint32) matcherInstance { |
||||
if len(m) == 1 { |
||||
return m[0].newInstance(mapIndices) |
||||
} |
||||
childResults := make(map[uint32]matchAnyResults) |
||||
for _, idx := range mapIndices { |
||||
childResults[idx] = matchAnyResults{ |
||||
matches: make([]potentialMatches, len(m)), |
||||
done: make([]bool, len(m)), |
||||
needMore: len(m), |
||||
} |
||||
} |
||||
childInstances := make([]matcherInstance, len(m)) |
||||
for i, matcher := range m { |
||||
childInstances[i] = matcher.newInstance(mapIndices) |
||||
} |
||||
return &matchAnyInstance{ |
||||
matchAny: m, |
||||
childInstances: childInstances, |
||||
childResults: childResults, |
||||
} |
||||
} |
||||
|
||||
// getMatchesForLayer implements matcherInstance.
|
||||
func (m *matchAnyInstance) getMatchesForLayer(ctx context.Context, layerIndex uint32) (mergedResults []matcherResult, err error) { |
||||
if len(m.matchAny) == 0 { |
||||
// return "wild card" results (potentialMatches(nil) is interpreted as a
|
||||
// potential match at every log value index of the map).
|
||||
mergedResults = make([]matcherResult, len(m.childResults)) |
||||
var i int |
||||
for mapIndex := range m.childResults { |
||||
mergedResults[i] = matcherResult{mapIndex: mapIndex, matches: nil} |
||||
i++ |
||||
} |
||||
return mergedResults, nil |
||||
} |
||||
for i, childInstance := range m.childInstances { |
||||
results, err := childInstance.getMatchesForLayer(ctx, layerIndex) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
for _, result := range results { |
||||
mr, ok := m.childResults[result.mapIndex] |
||||
if !ok || mr.done[i] { |
||||
continue |
||||
} |
||||
mr.done[i] = true |
||||
mr.matches[i] = result.matches |
||||
mr.needMore-- |
||||
if mr.needMore == 0 || result.matches == nil { |
||||
mergedResults = append(mergedResults, matcherResult{ |
||||
mapIndex: result.mapIndex, |
||||
matches: mergeResults(mr.matches), |
||||
}) |
||||
delete(m.childResults, result.mapIndex) |
||||
} else { |
||||
m.childResults[result.mapIndex] = mr |
||||
} |
||||
} |
||||
} |
||||
return mergedResults, nil |
||||
} |
||||
|
||||
// dropIndices implements matcherInstance.
|
||||
func (m *matchAnyInstance) dropIndices(dropIndices []uint32) { |
||||
for _, childInstance := range m.childInstances { |
||||
childInstance.dropIndices(dropIndices) |
||||
} |
||||
for _, mapIndex := range dropIndices { |
||||
delete(m.childResults, mapIndex) |
||||
} |
||||
} |
||||
|
||||
// mergeResults merges multiple lists of matches into a single one, preserving
|
||||
// ascending order and filtering out any duplicates.
|
||||
func mergeResults(results []potentialMatches) potentialMatches { |
||||
if len(results) == 0 { |
||||
return nil |
||||
} |
||||
var sumLen int |
||||
for _, res := range results { |
||||
if res == nil { |
||||
// nil is a wild card; all indices in map range are potential matches
|
||||
return nil |
||||
} |
||||
sumLen += len(res) |
||||
} |
||||
merged := make(potentialMatches, 0, sumLen) |
||||
for { |
||||
best := -1 |
||||
for i, res := range results { |
||||
if len(res) == 0 { |
||||
continue |
||||
} |
||||
if best < 0 || res[0] < results[best][0] { |
||||
best = i |
||||
} |
||||
} |
||||
if best < 0 { |
||||
return merged |
||||
} |
||||
if len(merged) == 0 || results[best][0] > merged[len(merged)-1] { |
||||
merged = append(merged, results[best][0]) |
||||
} |
||||
results[best] = results[best][1:] |
||||
} |
||||
} |
||||
|
||||
// matchSequence combines two matchers, a "base" and a "next" matcher with a
|
||||
// positive integer offset so that the resulting matcher signals a match at log
|
||||
// value index X when the base matcher returns a match at X and the next matcher
|
||||
// gives a match at X+offset. Note that matchSequence can be used recursively to
|
||||
// detect any log value sequence.
|
||||
type matchSequence struct { |
||||
params *Params |
||||
base, next matcher |
||||
offset uint64 |
||||
statsLock sync.Mutex |
||||
baseStats, nextStats matchOrderStats |
||||
} |
||||
|
||||
// newInstance creates a new instance of matchSequence.
|
||||
func (m *matchSequence) newInstance(mapIndices []uint32) matcherInstance { |
||||
// determine set of indices to request from next matcher
|
||||
nextIndices := make([]uint32, 0, len(mapIndices)*3/2) |
||||
needMatched := make(map[uint32]struct{}) |
||||
baseRequested := make(map[uint32]struct{}) |
||||
nextRequested := make(map[uint32]struct{}) |
||||
for _, mapIndex := range mapIndices { |
||||
needMatched[mapIndex] = struct{}{} |
||||
baseRequested[mapIndex] = struct{}{} |
||||
if _, ok := nextRequested[mapIndex]; !ok { |
||||
nextIndices = append(nextIndices, mapIndex) |
||||
nextRequested[mapIndex] = struct{}{} |
||||
} |
||||
nextIndices = append(nextIndices, mapIndex+1) |
||||
nextRequested[mapIndex+1] = struct{}{} |
||||
} |
||||
return &matchSequenceInstance{ |
||||
matchSequence: m, |
||||
baseInstance: m.base.newInstance(mapIndices), |
||||
nextInstance: m.next.newInstance(nextIndices), |
||||
needMatched: needMatched, |
||||
baseRequested: baseRequested, |
||||
nextRequested: nextRequested, |
||||
baseResults: make(map[uint32]potentialMatches), |
||||
nextResults: make(map[uint32]potentialMatches), |
||||
} |
||||
} |
||||
|
||||
// matchOrderStats collects statistics about the evaluating cost and the
|
||||
// occurence of empty result sets from both base and next child matchers.
|
||||
// This allows the optimization of the evaluation order by evaluating the
|
||||
// child first that is cheaper and/or gives empty results more often and not
|
||||
// evaluating the other child in most cases.
|
||||
// Note that matchOrderStats is specific to matchSequence and the results are
|
||||
// carried over to future instances as the results are mostly useful when
|
||||
// evaluating layer zero of each instance. For this reason it should be used
|
||||
// in a thread safe way as is may be accessed from multiple worker goroutines.
|
||||
type matchOrderStats struct { |
||||
totalCount, nonEmptyCount, totalCost uint64 |
||||
} |
||||
|
||||
// add collects statistics after a child has been evaluated for a certain layer.
|
||||
func (ms *matchOrderStats) add(empty bool, layerIndex uint32) { |
||||
if empty && layerIndex != 0 { |
||||
// matchers may be evaluated for higher layers after all results have
|
||||
// been returned. Also, empty results are not relevant when previous
|
||||
// layers yielded matches already, so these cases can be ignored.
|
||||
return |
||||
} |
||||
ms.totalCount++ |
||||
if !empty { |
||||
ms.nonEmptyCount++ |
||||
} |
||||
ms.totalCost += uint64(layerIndex + 1) |
||||
} |
||||
|
||||
// mergeStats merges two sets of matchOrderStats.
|
||||
func (ms *matchOrderStats) mergeStats(add matchOrderStats) { |
||||
ms.totalCount += add.totalCount |
||||
ms.nonEmptyCount += add.nonEmptyCount |
||||
ms.totalCost += add.totalCost |
||||
} |
||||
|
||||
// baseFirst returns true if the base child matcher should be evaluated first.
|
||||
func (m *matchSequence) baseFirst() bool { |
||||
m.statsLock.Lock() |
||||
bf := float64(m.baseStats.totalCost)*float64(m.nextStats.totalCount)+ |
||||
float64(m.baseStats.nonEmptyCount)*float64(m.nextStats.totalCost) < |
||||
float64(m.baseStats.totalCost)*float64(m.nextStats.nonEmptyCount)+ |
||||
float64(m.nextStats.totalCost)*float64(m.baseStats.totalCount) |
||||
m.statsLock.Unlock() |
||||
return bf |
||||
} |
||||
|
||||
// mergeBaseStats merges a set of matchOrderStats into the base matcher stats.
|
||||
func (m *matchSequence) mergeBaseStats(stats matchOrderStats) { |
||||
m.statsLock.Lock() |
||||
m.baseStats.mergeStats(stats) |
||||
m.statsLock.Unlock() |
||||
} |
||||
|
||||
// mergeNextStats merges a set of matchOrderStats into the next matcher stats.
|
||||
func (m *matchSequence) mergeNextStats(stats matchOrderStats) { |
||||
m.statsLock.Lock() |
||||
m.nextStats.mergeStats(stats) |
||||
m.statsLock.Unlock() |
||||
} |
||||
|
||||
// newMatchSequence creates a recursive sequence matcher from a list of underlying
|
||||
// matchers. The resulting matcher signals a match at log value index X when each
|
||||
// underlying matcher matchers[i] returns a match at X+i.
|
||||
func newMatchSequence(params *Params, matchers []matcher) matcher { |
||||
if len(matchers) == 0 { |
||||
panic("zero length sequence matchers are not allowed") |
||||
} |
||||
if len(matchers) == 1 { |
||||
return matchers[0] |
||||
} |
||||
return &matchSequence{ |
||||
params: params, |
||||
base: newMatchSequence(params, matchers[:len(matchers)-1]), |
||||
next: matchers[len(matchers)-1], |
||||
offset: uint64(len(matchers) - 1), |
||||
} |
||||
} |
||||
|
||||
// matchSequenceInstance is an instance of matchSequence.
|
||||
type matchSequenceInstance struct { |
||||
*matchSequence |
||||
baseInstance, nextInstance matcherInstance |
||||
baseRequested, nextRequested, needMatched map[uint32]struct{} |
||||
baseResults, nextResults map[uint32]potentialMatches |
||||
} |
||||
|
||||
// getMatchesForLayer implements matcherInstance.
|
||||
func (m *matchSequenceInstance) getMatchesForLayer(ctx context.Context, layerIndex uint32) (matchedResults []matcherResult, err error) { |
||||
// decide whether to evaluate base or next matcher first
|
||||
baseFirst := m.baseFirst() |
||||
if baseFirst { |
||||
if err := m.evalBase(ctx, layerIndex); err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
if err := m.evalNext(ctx, layerIndex); err != nil { |
||||
return nil, err |
||||
} |
||||
if !baseFirst { |
||||
if err := m.evalBase(ctx, layerIndex); err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
// evaluate and return matched results where possible
|
||||
for mapIndex := range m.needMatched { |
||||
if _, ok := m.baseRequested[mapIndex]; ok { |
||||
continue |
||||
} |
||||
if _, ok := m.nextRequested[mapIndex]; ok { |
||||
continue |
||||
} |
||||
if _, ok := m.nextRequested[mapIndex+1]; ok { |
||||
continue |
||||
} |
||||
matchedResults = append(matchedResults, matcherResult{ |
||||
mapIndex: mapIndex, |
||||
matches: m.params.matchResults(mapIndex, m.offset, m.baseResults[mapIndex], m.nextResults[mapIndex], m.nextResults[mapIndex+1]), |
||||
}) |
||||
delete(m.needMatched, mapIndex) |
||||
} |
||||
return matchedResults, nil |
||||
} |
||||
|
||||
// dropIndices implements matcherInstance.
|
||||
func (m *matchSequenceInstance) dropIndices(dropIndices []uint32) { |
||||
for _, mapIndex := range dropIndices { |
||||
delete(m.needMatched, mapIndex) |
||||
} |
||||
var dropBase, dropNext []uint32 |
||||
for _, mapIndex := range dropIndices { |
||||
if m.dropBase(mapIndex) { |
||||
dropBase = append(dropBase, mapIndex) |
||||
} |
||||
} |
||||
m.baseInstance.dropIndices(dropBase) |
||||
for _, mapIndex := range dropIndices { |
||||
if m.dropNext(mapIndex) { |
||||
dropNext = append(dropNext, mapIndex) |
||||
} |
||||
if m.dropNext(mapIndex + 1) { |
||||
dropNext = append(dropNext, mapIndex+1) |
||||
} |
||||
} |
||||
m.nextInstance.dropIndices(dropNext) |
||||
} |
||||
|
||||
// evalBase evaluates the base child matcher and drops map indices from the
|
||||
// next matcher if possible.
|
||||
func (m *matchSequenceInstance) evalBase(ctx context.Context, layerIndex uint32) error { |
||||
results, err := m.baseInstance.getMatchesForLayer(ctx, layerIndex) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
var ( |
||||
dropIndices []uint32 |
||||
stats matchOrderStats |
||||
) |
||||
for _, r := range results { |
||||
m.baseResults[r.mapIndex] = r.matches |
||||
delete(m.baseRequested, r.mapIndex) |
||||
stats.add(r.matches != nil && len(r.matches) == 0, layerIndex) |
||||
} |
||||
m.mergeBaseStats(stats) |
||||
for _, r := range results { |
||||
if m.dropNext(r.mapIndex) { |
||||
dropIndices = append(dropIndices, r.mapIndex) |
||||
} |
||||
if m.dropNext(r.mapIndex + 1) { |
||||
dropIndices = append(dropIndices, r.mapIndex+1) |
||||
} |
||||
} |
||||
if len(dropIndices) > 0 { |
||||
m.nextInstance.dropIndices(dropIndices) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// evalNext evaluates the next child matcher and drops map indices from the
|
||||
// base matcher if possible.
|
||||
func (m *matchSequenceInstance) evalNext(ctx context.Context, layerIndex uint32) error { |
||||
results, err := m.nextInstance.getMatchesForLayer(ctx, layerIndex) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
var ( |
||||
dropIndices []uint32 |
||||
stats matchOrderStats |
||||
) |
||||
for _, r := range results { |
||||
m.nextResults[r.mapIndex] = r.matches |
||||
delete(m.nextRequested, r.mapIndex) |
||||
stats.add(r.matches != nil && len(r.matches) == 0, layerIndex) |
||||
} |
||||
m.mergeNextStats(stats) |
||||
for _, r := range results { |
||||
if r.mapIndex > 0 && m.dropBase(r.mapIndex-1) { |
||||
dropIndices = append(dropIndices, r.mapIndex-1) |
||||
} |
||||
if m.dropBase(r.mapIndex) { |
||||
dropIndices = append(dropIndices, r.mapIndex) |
||||
} |
||||
} |
||||
if len(dropIndices) > 0 { |
||||
m.baseInstance.dropIndices(dropIndices) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// dropBase checks whether the given map index can be dropped from the base
|
||||
// matcher based on the known results from the next matcher and removes it
|
||||
// from the internal requested set and returns true if possible.
|
||||
func (m *matchSequenceInstance) dropBase(mapIndex uint32) bool { |
||||
if _, ok := m.baseRequested[mapIndex]; !ok { |
||||
return false |
||||
} |
||||
if _, ok := m.needMatched[mapIndex]; ok { |
||||
if next := m.nextResults[mapIndex]; next == nil || |
||||
(len(next) > 0 && next[len(next)-1] >= (uint64(mapIndex)<<m.params.logValuesPerMap)+m.offset) { |
||||
return false |
||||
} |
||||
if nextNext := m.nextResults[mapIndex+1]; nextNext == nil || |
||||
(len(nextNext) > 0 && nextNext[0] < (uint64(mapIndex+1)<<m.params.logValuesPerMap)+m.offset) { |
||||
return false |
||||
} |
||||
} |
||||
delete(m.baseRequested, mapIndex) |
||||
return true |
||||
} |
||||
|
||||
// dropNext checks whether the given map index can be dropped from the next
|
||||
// matcher based on the known results from the base matcher and removes it
|
||||
// from the internal requested set and returns true if possible.
|
||||
func (m *matchSequenceInstance) dropNext(mapIndex uint32) bool { |
||||
if _, ok := m.nextRequested[mapIndex]; !ok { |
||||
return false |
||||
} |
||||
if _, ok := m.needMatched[mapIndex-1]; ok { |
||||
if prevBase := m.baseResults[mapIndex-1]; prevBase == nil || |
||||
(len(prevBase) > 0 && prevBase[len(prevBase)-1]+m.offset >= (uint64(mapIndex)<<m.params.logValuesPerMap)) { |
||||
return false |
||||
} |
||||
} |
||||
if _, ok := m.needMatched[mapIndex]; ok { |
||||
if base := m.baseResults[mapIndex]; base == nil || |
||||
(len(base) > 0 && base[0]+m.offset < (uint64(mapIndex+1)<<m.params.logValuesPerMap)) { |
||||
|
||||
return false |
||||
} |
||||
} |
||||
delete(m.nextRequested, mapIndex) |
||||
return true |
||||
} |
||||
|
||||
// matchResults returns a list of sequence matches for the given mapIndex and
|
||||
// offset based on the base matcher's results at mapIndex and the next matcher's
|
||||
// results at mapIndex and mapIndex+1. Note that acquiring nextNextRes may be
|
||||
// skipped and it can be substituted with an empty list if baseRes has no potential
|
||||
// matches that could be sequence matched with anything that could be in nextNextRes.
|
||||
func (params *Params) matchResults(mapIndex uint32, offset uint64, baseRes, nextRes, nextNextRes potentialMatches) potentialMatches { |
||||
if nextRes == nil || (baseRes != nil && len(baseRes) == 0) { |
||||
// if nextRes is a wild card or baseRes is empty then the sequence matcher
|
||||
// result equals baseRes.
|
||||
return baseRes |
||||
} |
||||
if len(nextRes) > 0 { |
||||
// discard items from nextRes whose corresponding base matcher results
|
||||
// with the negative offset applied would be located at mapIndex-1.
|
||||
start := 0 |
||||
for start < len(nextRes) && nextRes[start] < uint64(mapIndex)<<params.logValuesPerMap+offset { |
||||
start++ |
||||
} |
||||
nextRes = nextRes[start:] |
||||
} |
||||
if len(nextNextRes) > 0 { |
||||
// discard items from nextNextRes whose corresponding base matcher results
|
||||
// with the negative offset applied would still be located at mapIndex+1.
|
||||
stop := 0 |
||||
for stop < len(nextNextRes) && nextNextRes[stop] < uint64(mapIndex+1)<<params.logValuesPerMap+offset { |
||||
stop++ |
||||
} |
||||
nextNextRes = nextNextRes[:stop] |
||||
} |
||||
maxLen := len(nextRes) + len(nextNextRes) |
||||
if maxLen == 0 { |
||||
return nextRes |
||||
} |
||||
if len(baseRes) < maxLen { |
||||
maxLen = len(baseRes) |
||||
} |
||||
// iterate through baseRes, nextRes and nextNextRes and collect matching results.
|
||||
matchedRes := make(potentialMatches, 0, maxLen) |
||||
for _, nextRes := range []potentialMatches{nextRes, nextNextRes} { |
||||
if baseRes != nil { |
||||
for len(nextRes) > 0 && len(baseRes) > 0 { |
||||
if nextRes[0] > baseRes[0]+offset { |
||||
baseRes = baseRes[1:] |
||||
} else if nextRes[0] < baseRes[0]+offset { |
||||
nextRes = nextRes[1:] |
||||
} else { |
||||
matchedRes = append(matchedRes, baseRes[0]) |
||||
baseRes = baseRes[1:] |
||||
nextRes = nextRes[1:] |
||||
} |
||||
} |
||||
} else { |
||||
// baseRes is a wild card so just return next matcher results with
|
||||
// negative offset.
|
||||
for len(nextRes) > 0 { |
||||
matchedRes = append(matchedRes, nextRes[0]-offset) |
||||
nextRes = nextRes[1:] |
||||
} |
||||
} |
||||
} |
||||
return matchedRes |
||||
} |
||||
|
||||
// timeStats collects processing time statistics while searching in the log
|
||||
// index. Used only when the useTimeStats global flag is true.
|
||||
type timeStats struct { |
||||
dt, cnt [stCount]int64 |
||||
} |
||||
|
||||
const ( |
||||
stNone = iota |
||||
stRowCalc |
||||
stFetchFirst |
||||
stFetchMore |
||||
stProcess |
||||
stOther |
||||
stCount |
||||
) |
||||
|
||||
var stNames = []string{"", "rowCalc", "fetchFirst", "fetchMore", "process", "other"} |
||||
|
||||
// set sets the processing state to one of the pre-defined constants.
|
||||
// Processing time spent in each state is measured separately.
|
||||
func (ts *timeStats) set(state *int, newState int) { |
||||
if !useTimeStats || newState == *state { |
||||
return |
||||
} |
||||
now := int64(mclock.Now()) |
||||
atomic.AddInt64(&ts.dt[*state], now) |
||||
atomic.AddInt64(&ts.dt[newState], -now) |
||||
atomic.AddInt64(&ts.cnt[newState], 1) |
||||
*state = newState |
||||
} |
||||
|
||||
// print prints the collected statistics.
|
||||
func (ts *timeStats) print() { |
||||
for i := 1; i < stCount; i++ { |
||||
log.Info("Matcher stats", "name", stNames[i], "dt", time.Duration(ts.dt[i]), "count", ts.cnt[i]) |
||||
} |
||||
} |
@ -0,0 +1,199 @@ |
||||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package filtermaps |
||||
|
||||
import ( |
||||
"context" |
||||
"errors" |
||||
|
||||
"github.com/ethereum/go-ethereum/core/types" |
||||
) |
||||
|
||||
// FilterMapsMatcherBackend implements MatcherBackend.
|
||||
type FilterMapsMatcherBackend struct { |
||||
f *FilterMaps |
||||
// these fields should be accessed under f.matchersLock mutex.
|
||||
valid bool |
||||
firstValid, lastValid uint64 |
||||
syncCh chan SyncRange |
||||
} |
||||
|
||||
// NewMatcherBackend returns a FilterMapsMatcherBackend after registering it in
|
||||
// the active matcher set.
|
||||
// Note that Close should always be called when the matcher is no longer used.
|
||||
func (f *FilterMaps) NewMatcherBackend() *FilterMapsMatcherBackend { |
||||
f.indexLock.RLock() |
||||
f.matchersLock.Lock() |
||||
defer func() { |
||||
f.matchersLock.Unlock() |
||||
f.indexLock.RUnlock() |
||||
}() |
||||
|
||||
fm := &FilterMapsMatcherBackend{ |
||||
f: f, |
||||
valid: f.initialized && f.afterLastIndexedBlock > f.firstIndexedBlock, |
||||
firstValid: f.firstIndexedBlock, |
||||
lastValid: f.afterLastIndexedBlock - 1, |
||||
} |
||||
f.matchers[fm] = struct{}{} |
||||
return fm |
||||
} |
||||
|
||||
// GetParams returns the filtermaps parameters.
|
||||
// GetParams implements MatcherBackend.
|
||||
func (fm *FilterMapsMatcherBackend) GetParams() *Params { |
||||
return &fm.f.Params |
||||
} |
||||
|
||||
// Close removes the matcher from the set of active matchers and ensures that
|
||||
// any SyncLogIndex calls are cancelled.
|
||||
// Close implements MatcherBackend.
|
||||
func (fm *FilterMapsMatcherBackend) Close() { |
||||
fm.f.matchersLock.Lock() |
||||
defer fm.f.matchersLock.Unlock() |
||||
|
||||
delete(fm.f.matchers, fm) |
||||
} |
||||
|
||||
// GetFilterMapRow returns the given row of the given map. If the row is empty
|
||||
// then a non-nil zero length row is returned. If baseLayerOnly is true then
|
||||
// only the first baseRowLength entries of the row are guaranteed to be
|
||||
// returned.
|
||||
// Note that the returned slices should not be modified, they should be copied
|
||||
// on write.
|
||||
// GetFilterMapRow implements MatcherBackend.
|
||||
func (fm *FilterMapsMatcherBackend) GetFilterMapRow(ctx context.Context, mapIndex, rowIndex uint32, baseLayerOnly bool) (FilterRow, error) { |
||||
return fm.f.getFilterMapRow(mapIndex, rowIndex, baseLayerOnly) |
||||
} |
||||
|
||||
// GetBlockLvPointer returns the starting log value index where the log values
|
||||
// generated by the given block are located. If blockNumber is beyond the current
|
||||
// head then the first unoccupied log value index is returned.
|
||||
// GetBlockLvPointer implements MatcherBackend.
|
||||
func (fm *FilterMapsMatcherBackend) GetBlockLvPointer(ctx context.Context, blockNumber uint64) (uint64, error) { |
||||
fm.f.indexLock.RLock() |
||||
defer fm.f.indexLock.RUnlock() |
||||
|
||||
return fm.f.getBlockLvPointer(blockNumber) |
||||
} |
||||
|
||||
// GetLogByLvIndex returns the log at the given log value index.
|
||||
// Note that this function assumes that the log index structure is consistent
|
||||
// with the canonical chain at the point where the given log value index points.
|
||||
// If this is not the case then an invalid result may be returned or certain
|
||||
// logs might not be returned at all.
|
||||
// No error is returned though because of an inconsistency between the chain and
|
||||
// the log index. It is the caller's responsibility to verify this consistency
|
||||
// using SyncLogIndex and re-process certain blocks if necessary.
|
||||
// GetLogByLvIndex implements MatcherBackend.
|
||||
func (fm *FilterMapsMatcherBackend) GetLogByLvIndex(ctx context.Context, lvIndex uint64) (*types.Log, error) { |
||||
fm.f.indexLock.RLock() |
||||
defer fm.f.indexLock.RUnlock() |
||||
|
||||
return fm.f.getLogByLvIndex(lvIndex) |
||||
} |
||||
|
||||
// synced signals to the matcher that has triggered a synchronisation that it
|
||||
// has been finished and the log index is consistent with the chain head passed
|
||||
// as a parameter.
|
||||
// Note that if the log index head was far behind the chain head then it might not
|
||||
// be synced up to the given head in a single step. Still, the latest chain head
|
||||
// should be passed as a parameter and the existing log index should be consistent
|
||||
// with that chain.
|
||||
func (fm *FilterMapsMatcherBackend) synced(headNumber uint64) { |
||||
fm.f.indexLock.RLock() |
||||
fm.f.matchersLock.Lock() |
||||
defer func() { |
||||
fm.f.matchersLock.Unlock() |
||||
fm.f.indexLock.RUnlock() |
||||
}() |
||||
|
||||
fm.syncCh <- SyncRange{ |
||||
HeadNumber: headNumber, |
||||
Valid: fm.valid, |
||||
FirstValid: fm.firstValid, |
||||
LastValid: fm.lastValid, |
||||
Indexed: fm.f.hasIndexedBlocks(), |
||||
FirstIndexed: fm.f.firstIndexedBlock, |
||||
LastIndexed: fm.f.afterLastIndexedBlock - 1, |
||||
} |
||||
fm.valid = fm.f.hasIndexedBlocks() |
||||
fm.firstValid = fm.f.firstIndexedBlock |
||||
fm.lastValid = fm.f.afterLastIndexedBlock - 1 |
||||
fm.syncCh = nil |
||||
} |
||||
|
||||
// SyncLogIndex ensures that the log index is consistent with the current state
|
||||
// of the chain and is synced up to the current head. It blocks until this state
|
||||
// is achieved or the context is cancelled.
|
||||
// If successful, it returns a SyncRange that contains the latest chain head,
|
||||
// the indexed range that is currently consistent with the chain and the valid
|
||||
// range that has not been changed and has been consistent with all states of the
|
||||
// chain since the previous SyncLogIndex or the creation of the matcher backend.
|
||||
func (fm *FilterMapsMatcherBackend) SyncLogIndex(ctx context.Context) (SyncRange, error) { |
||||
if fm.f.noHistory { |
||||
if !fm.f.initialized { |
||||
return SyncRange{}, errors.New("canonical chain head not available") |
||||
} |
||||
return SyncRange{HeadNumber: fm.f.targetBlockNumber}, nil |
||||
} |
||||
// add SyncRange return channel, ensuring that
|
||||
syncCh := make(chan SyncRange, 1) |
||||
fm.f.matchersLock.Lock() |
||||
fm.syncCh = syncCh |
||||
fm.f.matchersLock.Unlock() |
||||
|
||||
select { |
||||
case fm.f.matcherSyncCh <- fm: |
||||
case <-ctx.Done(): |
||||
return SyncRange{}, ctx.Err() |
||||
} |
||||
select { |
||||
case vr := <-syncCh: |
||||
return vr, nil |
||||
case <-ctx.Done(): |
||||
return SyncRange{}, ctx.Err() |
||||
} |
||||
} |
||||
|
||||
// updateMatchersValidRange iterates through active matchers and limits their
|
||||
// valid range with the current indexed range. This function should be called
|
||||
// whenever a part of the log index has been removed, before adding new blocks
|
||||
// to it.
|
||||
// Note that this function assumes that the index read lock is being held.
|
||||
func (f *FilterMaps) updateMatchersValidRange() { |
||||
f.matchersLock.Lock() |
||||
defer f.matchersLock.Unlock() |
||||
|
||||
for fm := range f.matchers { |
||||
if !f.hasIndexedBlocks() { |
||||
fm.valid = false |
||||
} |
||||
if !fm.valid { |
||||
continue |
||||
} |
||||
if fm.firstValid < f.firstIndexedBlock { |
||||
fm.firstValid = f.firstIndexedBlock |
||||
} |
||||
if fm.lastValid >= f.afterLastIndexedBlock { |
||||
fm.lastValid = f.afterLastIndexedBlock - 1 |
||||
} |
||||
if fm.firstValid > fm.lastValid { |
||||
fm.valid = false |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,87 @@ |
||||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package filtermaps |
||||
|
||||
import ( |
||||
"context" |
||||
crand "crypto/rand" |
||||
"math/rand" |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
) |
||||
|
||||
func TestMatcher(t *testing.T) { |
||||
ts := newTestSetup(t) |
||||
defer ts.close() |
||||
|
||||
ts.chain.addBlocks(100, 10, 10, 4, true) |
||||
ts.setHistory(0, false) |
||||
ts.fm.WaitIdle() |
||||
|
||||
for i := 0; i < 2000; i++ { |
||||
bhash := ts.chain.canonical[rand.Intn(len(ts.chain.canonical))] |
||||
receipts := ts.chain.receipts[bhash] |
||||
if len(receipts) == 0 { |
||||
continue |
||||
} |
||||
receipt := receipts[rand.Intn(len(receipts))] |
||||
if len(receipt.Logs) == 0 { |
||||
continue |
||||
} |
||||
log := receipt.Logs[rand.Intn(len(receipt.Logs))] |
||||
var ok bool |
||||
addresses := make([]common.Address, rand.Intn(3)) |
||||
for i := range addresses { |
||||
crand.Read(addresses[i][:]) |
||||
} |
||||
if len(addresses) > 0 { |
||||
addresses[rand.Intn(len(addresses))] = log.Address |
||||
ok = true |
||||
} |
||||
topics := make([][]common.Hash, rand.Intn(len(log.Topics)+1)) |
||||
for j := range topics { |
||||
topics[j] = make([]common.Hash, rand.Intn(3)) |
||||
for i := range topics[j] { |
||||
crand.Read(topics[j][i][:]) |
||||
} |
||||
if len(topics[j]) > 0 { |
||||
topics[j][rand.Intn(len(topics[j]))] = log.Topics[j] |
||||
ok = true |
||||
} |
||||
} |
||||
if !ok { |
||||
continue // cannot search for match-all pattern
|
||||
} |
||||
mb := ts.fm.NewMatcherBackend() |
||||
logs, err := GetPotentialMatches(context.Background(), mb, 0, 1000, addresses, topics) |
||||
mb.Close() |
||||
if err != nil { |
||||
t.Fatalf("Log search error: %v", err) |
||||
} |
||||
var found bool |
||||
for _, l := range logs { |
||||
if l == log { |
||||
found = true |
||||
break |
||||
} |
||||
} |
||||
if !found { |
||||
t.Fatalf("Log search did not return expected log (addresses: %v, topics: %v, expected log: %v)", addresses, topics, *log) |
||||
} |
||||
} |
||||
} |
@ -0,0 +1,249 @@ |
||||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package filtermaps |
||||
|
||||
import ( |
||||
"crypto/sha256" |
||||
"encoding/binary" |
||||
"math" |
||||
"sort" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
) |
||||
|
||||
// Params defines the basic parameters of the log index structure.
|
||||
type Params struct { |
||||
logMapHeight uint // log2(mapHeight)
|
||||
logMapsPerEpoch uint // log2(mapsPerEpoch)
|
||||
logValuesPerMap uint // log2(logValuesPerMap)
|
||||
baseRowLengthRatio uint // baseRowLength / average row length
|
||||
logLayerDiff uint // maxRowLength log2 growth per layer
|
||||
// derived fields
|
||||
mapHeight uint32 // filter map height (number of rows)
|
||||
mapsPerEpoch uint32 // number of maps in an epoch
|
||||
baseRowLength uint32 // maximum number of log values per row on layer 0
|
||||
valuesPerMap uint64 // number of log values marked on each filter map
|
||||
// not affecting consensus
|
||||
baseRowGroupLength uint32 // length of base row groups in local database
|
||||
} |
||||
|
||||
// DefaultParams is the set of parameters used on mainnet.
|
||||
var DefaultParams = Params{ |
||||
logMapHeight: 16, |
||||
logMapsPerEpoch: 10, |
||||
logValuesPerMap: 16, |
||||
baseRowGroupLength: 32, |
||||
baseRowLengthRatio: 8, |
||||
logLayerDiff: 2, |
||||
} |
||||
|
||||
// deriveFields calculates the derived fields of the parameter set.
|
||||
func (p *Params) deriveFields() { |
||||
p.mapHeight = uint32(1) << p.logMapHeight |
||||
p.mapsPerEpoch = uint32(1) << p.logMapsPerEpoch |
||||
p.valuesPerMap = uint64(1) << p.logValuesPerMap |
||||
p.baseRowLength = uint32(p.valuesPerMap * uint64(p.baseRowLengthRatio) / uint64(p.mapHeight)) |
||||
} |
||||
|
||||
// addressValue returns the log value hash of a log emitting address.
|
||||
func addressValue(address common.Address) common.Hash { |
||||
var result common.Hash |
||||
hasher := sha256.New() |
||||
hasher.Write(address[:]) |
||||
hasher.Sum(result[:0]) |
||||
return result |
||||
} |
||||
|
||||
// topicValue returns the log value hash of a log topic.
|
||||
func topicValue(topic common.Hash) common.Hash { |
||||
var result common.Hash |
||||
hasher := sha256.New() |
||||
hasher.Write(topic[:]) |
||||
hasher.Sum(result[:0]) |
||||
return result |
||||
} |
||||
|
||||
// rowIndex returns the row index in which the given log value should be marked
|
||||
// on the given map and mapping layer. Note that row assignments are re-shuffled
|
||||
// with a different frequency on each mapping layer, allowing efficient disk
|
||||
// access and Merkle proofs for long sections of short rows on lower order
|
||||
// layers while avoiding putting too many heavy rows next to each other on
|
||||
// higher order layers.
|
||||
func (p *Params) rowIndex(mapIndex, layerIndex uint32, logValue common.Hash) uint32 { |
||||
hasher := sha256.New() |
||||
hasher.Write(logValue[:]) |
||||
var indexEnc [8]byte |
||||
binary.LittleEndian.PutUint32(indexEnc[0:4], p.maskedMapIndex(mapIndex, layerIndex)) |
||||
binary.LittleEndian.PutUint32(indexEnc[4:8], layerIndex) |
||||
hasher.Write(indexEnc[:]) |
||||
var hash common.Hash |
||||
hasher.Sum(hash[:0]) |
||||
return binary.LittleEndian.Uint32(hash[:4]) % p.mapHeight |
||||
} |
||||
|
||||
// columnIndex returns the column index that should be added to the appropriate
|
||||
// row in order to place a mark for the next log value.
|
||||
func (p *Params) columnIndex(lvIndex uint64, logValue common.Hash) uint32 { |
||||
x := uint32(lvIndex % p.valuesPerMap) // log value sub-index
|
||||
transformHash := transformHash(uint32(lvIndex/p.valuesPerMap), logValue) |
||||
// apply column index transformation function
|
||||
x += binary.LittleEndian.Uint32(transformHash[0:4]) |
||||
x *= binary.LittleEndian.Uint32(transformHash[4:8])*2 + 1 |
||||
x ^= binary.LittleEndian.Uint32(transformHash[8:12]) |
||||
x *= binary.LittleEndian.Uint32(transformHash[12:16])*2 + 1 |
||||
x += binary.LittleEndian.Uint32(transformHash[16:20]) |
||||
x *= binary.LittleEndian.Uint32(transformHash[20:24])*2 + 1 |
||||
x ^= binary.LittleEndian.Uint32(transformHash[24:28]) |
||||
x *= binary.LittleEndian.Uint32(transformHash[28:32])*2 + 1 |
||||
return x |
||||
} |
||||
|
||||
// maxRowLength returns the maximum length filter rows are populated up to
|
||||
// when using the given mapping layer. A log value can be marked on the map
|
||||
// according to a given mapping layer if the row mapping on that layer points
|
||||
// to a row that has not yet reached the maxRowLength belonging to that layer.
|
||||
// This means that a row that is considered full on a given layer may still be
|
||||
// extended further on a higher order layer.
|
||||
// Each value is marked on the lowest order layer possible, assuming that marks
|
||||
// are added in ascending log value index order.
|
||||
// When searching for a log value one should consider all layers and process
|
||||
// corresponding rows up until the first one where the row mapped to the given
|
||||
// layer is not full.
|
||||
func (p *Params) maxRowLength(layerIndex uint32) uint32 { |
||||
logLayerDiff := uint(layerIndex) * p.logLayerDiff |
||||
if logLayerDiff > p.logMapsPerEpoch { |
||||
logLayerDiff = p.logMapsPerEpoch |
||||
} |
||||
return p.baseRowLength << logLayerDiff |
||||
} |
||||
|
||||
// maskedMapIndex returns the index used for row mapping calculation on the
|
||||
// given layer. On layer zero the mapping changes once per epoch, then the
|
||||
// frequency of re-mapping increases with every new layer until it reaches
|
||||
// the frequency where it is different for every mapIndex.
|
||||
func (p *Params) maskedMapIndex(mapIndex, layerIndex uint32) uint32 { |
||||
logLayerDiff := uint(layerIndex) * p.logLayerDiff |
||||
if logLayerDiff > p.logMapsPerEpoch { |
||||
logLayerDiff = p.logMapsPerEpoch |
||||
} |
||||
return mapIndex & (uint32(math.MaxUint32) << (p.logMapsPerEpoch - logLayerDiff)) |
||||
} |
||||
|
||||
// transformHash calculates a hash specific to a given map and log value hash
|
||||
// that defines a bijective function on the uint32 range. This function is used
|
||||
// to transform the log value sub-index (distance from the first index of the map)
|
||||
// into a 32 bit column index, then applied in reverse when searching for potential
|
||||
// matches for a given log value.
|
||||
func transformHash(mapIndex uint32, logValue common.Hash) (result common.Hash) { |
||||
hasher := sha256.New() |
||||
hasher.Write(logValue[:]) |
||||
var indexEnc [4]byte |
||||
binary.LittleEndian.PutUint32(indexEnc[:], mapIndex) |
||||
hasher.Write(indexEnc[:]) |
||||
hasher.Sum(result[:0]) |
||||
return |
||||
} |
||||
|
||||
// potentialMatches returns the list of log value indices potentially matching
|
||||
// the given log value hash in the range of the filter map the row belongs to.
|
||||
// Note that the list of indices is always sorted and potential duplicates are
|
||||
// removed. Though the column indices are stored in the same order they were
|
||||
// added and therefore the true matches are automatically reverse transformed
|
||||
// in the right order, false positives can ruin this property. Since these can
|
||||
// only be separated from true matches after the combined pattern matching of the
|
||||
// outputs of individual log value matchers and this pattern matcher assumes a
|
||||
// sorted and duplicate-free list of indices, we should ensure these properties
|
||||
// here.
|
||||
func (p *Params) potentialMatches(rows []FilterRow, mapIndex uint32, logValue common.Hash) potentialMatches { |
||||
results := make(potentialMatches, 0, 8) |
||||
transformHash := transformHash(mapIndex, logValue) |
||||
sub1 := binary.LittleEndian.Uint32(transformHash[0:4]) |
||||
mul1 := uint32ModInverse(binary.LittleEndian.Uint32(transformHash[4:8])*2 + 1) |
||||
xor1 := binary.LittleEndian.Uint32(transformHash[8:12]) |
||||
mul2 := uint32ModInverse(binary.LittleEndian.Uint32(transformHash[12:16])*2 + 1) |
||||
sub2 := binary.LittleEndian.Uint32(transformHash[16:20]) |
||||
mul3 := uint32ModInverse(binary.LittleEndian.Uint32(transformHash[20:24])*2 + 1) |
||||
xor2 := binary.LittleEndian.Uint32(transformHash[24:28]) |
||||
mul4 := uint32ModInverse(binary.LittleEndian.Uint32(transformHash[28:32])*2 + 1) |
||||
// perform reverse column index transformation on all column indices of the row.
|
||||
// if a column index was added by the searched log value then the reverse
|
||||
// transform will yield a valid log value sub-index of the given map.
|
||||
// Column index is 32 bits long while there are 2**16 valid log value indices
|
||||
// in the map's range, so this can also happen by accident with 1 in 2**16
|
||||
// chance, in which case we have a false positive.
|
||||
for i, row := range rows { |
||||
for _, columnIndex := range row { |
||||
if potentialSubIndex := (((((((columnIndex * mul4) ^ xor2) * mul3) - sub2) * mul2) ^ xor1) * mul1) - sub1; potentialSubIndex < uint32(p.valuesPerMap) { |
||||
results = append(results, uint64(mapIndex)<<p.logValuesPerMap+uint64(potentialSubIndex)) |
||||
} |
||||
} |
||||
if uint32(len(row)) < p.maxRowLength(uint32(i)) { |
||||
break |
||||
} |
||||
if i == len(rows)-1 { |
||||
panic("potentialMatches: insufficient list of row alternatives") |
||||
} |
||||
} |
||||
sort.Sort(results) |
||||
// remove duplicates
|
||||
j := 0 |
||||
for i, match := range results { |
||||
if i == 0 || match != results[i-1] { |
||||
results[j] = results[i] |
||||
j++ |
||||
} |
||||
} |
||||
return results[:j] |
||||
} |
||||
|
||||
// potentialMatches is a strictly monotonically increasing list of log value
|
||||
// indices in the range of a filter map that are potential matches for certain
|
||||
// filter criteria.
|
||||
// potentialMatches implements sort.Interface.
|
||||
// Note that nil is used as a wildcard and therefore means that all log value
|
||||
// indices in the filter map range are potential matches. If there are no
|
||||
// potential matches in the given map's range then an empty slice should be used.
|
||||
type potentialMatches []uint64 |
||||
|
||||
// noMatches means there are no potential matches in a given filter map's range.
|
||||
var noMatches = potentialMatches{} |
||||
|
||||
func (p potentialMatches) Len() int { return len(p) } |
||||
func (p potentialMatches) Less(i, j int) bool { return p[i] < p[j] } |
||||
func (p potentialMatches) Swap(i, j int) { p[i], p[j] = p[j], p[i] } |
||||
|
||||
// uint32ModInverse takes an odd 32 bit number and returns its modular
|
||||
// multiplicative inverse (mod 2**32), meaning that for any odd uint32 value v
|
||||
// uint32(v * uint32ModInverse(v)) == 1.
|
||||
func uint32ModInverse(v uint32) uint32 { |
||||
if v&1 == 0 { |
||||
panic("uint32ModInverse called with even argument") |
||||
} |
||||
m := int64(1) << 32 |
||||
m0 := m |
||||
a := int64(v) |
||||
x, y := int64(1), int64(0) |
||||
for a > 1 { |
||||
q := a / m |
||||
m, a = a%m, m |
||||
x, y = y, x-q*y |
||||
} |
||||
if x < 0 { |
||||
x += m0 |
||||
} |
||||
return uint32(x) |
||||
} |
@ -0,0 +1,149 @@ |
||||
// Copyright 2024 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package filtermaps |
||||
|
||||
import ( |
||||
crand "crypto/rand" |
||||
"math/rand" |
||||
"testing" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
) |
||||
|
||||
func TestSingleMatch(t *testing.T) { |
||||
params := DefaultParams |
||||
params.deriveFields() |
||||
|
||||
for count := 0; count < 100000; count++ { |
||||
// generate a row with a single random entry
|
||||
mapIndex := rand.Uint32() |
||||
lvIndex := uint64(mapIndex)<<params.logValuesPerMap + uint64(rand.Intn(int(params.valuesPerMap))) |
||||
var lvHash common.Hash |
||||
crand.Read(lvHash[:]) |
||||
row := FilterRow{params.columnIndex(lvIndex, lvHash)} |
||||
matches := params.potentialMatches([]FilterRow{row}, mapIndex, lvHash) |
||||
// check if it has been reverse transformed correctly
|
||||
if len(matches) != 1 { |
||||
t.Fatalf("Invalid length of matches (got %d, expected 1)", len(matches)) |
||||
} |
||||
if matches[0] != lvIndex { |
||||
if len(matches) != 1 { |
||||
t.Fatalf("Incorrect match returned (got %d, expected %d)", matches[0], lvIndex) |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
const ( |
||||
testPmCount = 100 |
||||
testPmLen = 1000 |
||||
) |
||||
|
||||
func TestPotentialMatches(t *testing.T) { |
||||
params := DefaultParams |
||||
params.deriveFields() |
||||
|
||||
var falsePositives int |
||||
for count := 0; count < testPmCount; count++ { |
||||
mapIndex := rand.Uint32() |
||||
lvStart := uint64(mapIndex) << params.logValuesPerMap |
||||
var row FilterRow |
||||
lvIndices := make([]uint64, testPmLen) |
||||
lvHashes := make([]common.Hash, testPmLen+1) |
||||
for i := range lvIndices { |
||||
// add testPmLen single entries with different log value hashes at different indices
|
||||
lvIndices[i] = lvStart + uint64(rand.Intn(int(params.valuesPerMap))) |
||||
crand.Read(lvHashes[i][:]) |
||||
row = append(row, params.columnIndex(lvIndices[i], lvHashes[i])) |
||||
} |
||||
// add the same log value hash at the first testPmLen log value indices of the map's range
|
||||
crand.Read(lvHashes[testPmLen][:]) |
||||
for lvIndex := lvStart; lvIndex < lvStart+testPmLen; lvIndex++ { |
||||
row = append(row, params.columnIndex(lvIndex, lvHashes[testPmLen])) |
||||
} |
||||
// randomly duplicate some entries
|
||||
for i := 0; i < testPmLen; i++ { |
||||
row = append(row, row[rand.Intn(len(row))]) |
||||
} |
||||
// randomly mix up order of elements
|
||||
for i := len(row) - 1; i > 0; i-- { |
||||
j := rand.Intn(i) |
||||
row[i], row[j] = row[j], row[i] |
||||
} |
||||
// split up into a list of rows if longer than allowed
|
||||
var rows []FilterRow |
||||
for layerIndex := uint32(0); row != nil; layerIndex++ { |
||||
maxLen := int(params.maxRowLength(layerIndex)) |
||||
if len(row) > maxLen { |
||||
rows = append(rows, row[:maxLen]) |
||||
row = row[maxLen:] |
||||
} else { |
||||
rows = append(rows, row) |
||||
row = nil |
||||
} |
||||
} |
||||
// check retrieved matches while also counting false positives
|
||||
for i, lvHash := range lvHashes { |
||||
matches := params.potentialMatches(rows, mapIndex, lvHash) |
||||
if i < testPmLen { |
||||
// check single entry match
|
||||
if len(matches) < 1 { |
||||
t.Fatalf("Invalid length of matches (got %d, expected >=1)", len(matches)) |
||||
} |
||||
var found bool |
||||
for _, lvi := range matches { |
||||
if lvi == lvIndices[i] { |
||||
found = true |
||||
} else { |
||||
falsePositives++ |
||||
} |
||||
} |
||||
if !found { |
||||
t.Fatalf("Expected match not found (got %v, expected %d)", matches, lvIndices[i]) |
||||
} |
||||
} else { |
||||
// check "long series" match
|
||||
if len(matches) < testPmLen { |
||||
t.Fatalf("Invalid length of matches (got %d, expected >=%d)", len(matches), testPmLen) |
||||
} |
||||
// since results are ordered, first testPmLen entries should always match exactly
|
||||
for j := 0; j < testPmLen; j++ { |
||||
if matches[j] != lvStart+uint64(j) { |
||||
t.Fatalf("Incorrect match at index %d (got %d, expected %d)", j, matches[j], lvStart+uint64(j)) |
||||
} |
||||
} |
||||
// the rest are false positives
|
||||
falsePositives += len(matches) - testPmLen |
||||
} |
||||
} |
||||
} |
||||
// Whenever looking for a certain log value hash, each entry in the row that
|
||||
// was generated by another log value hash (a "foreign entry") has a
|
||||
// valuesPerMap // 2^32 chance of yielding a false positive if the reverse
|
||||
// transformed 32 bit integer is by random chance less than valuesPerMap and
|
||||
// is therefore considered a potentially valid match.
|
||||
// We have testPmLen unique hash entries and a testPmLen long series of entries
|
||||
// for the same hash. For each of the testPmLen unique hash entries there are
|
||||
// testPmLen*2-1 foreign entries while for the long series there are testPmLen
|
||||
// foreign entries. This means that after performing all these filtering runs,
|
||||
// we have processed 2*testPmLen^2 foreign entries, which given us an estimate
|
||||
// of how many false positives to expect.
|
||||
expFalse := int(uint64(testPmCount*testPmLen*testPmLen*2) * params.valuesPerMap >> 32) |
||||
if falsePositives < expFalse/2 || falsePositives > expFalse*3/2 { |
||||
t.Fatalf("False positive rate out of expected range (got %d, expected %d +-50%%)", falsePositives, expFalse) |
||||
} |
||||
} |
Loading…
Reference in new issue