|
|
|
// Copyright 2018 The go-ethereum Authors
|
|
|
|
// This file is part of the go-ethereum library.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
//
|
|
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
package rawdb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"encoding/binary"
|
|
|
|
"fmt"
|
|
|
|
"math/big"
|
|
|
|
"sort"
|
|
|
|
|
|
|
|
"github.com/ethereum/go-ethereum/common"
|
|
|
|
"github.com/ethereum/go-ethereum/core/types"
|
|
|
|
"github.com/ethereum/go-ethereum/crypto"
|
|
|
|
"github.com/ethereum/go-ethereum/ethdb"
|
|
|
|
"github.com/ethereum/go-ethereum/log"
|
|
|
|
"github.com/ethereum/go-ethereum/params"
|
|
|
|
"github.com/ethereum/go-ethereum/rlp"
|
|
|
|
)
|
|
|
|
|
|
|
|
// ReadCanonicalHash retrieves the hash assigned to a canonical block number.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash {
|
|
|
|
data, _ := db.Ancient(freezerHashTable, number)
|
|
|
|
if len(data) == 0 {
|
|
|
|
data, _ = db.Get(headerHashKey(number))
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
// In the background freezer is moving data from leveldb to flatten files.
|
|
|
|
// So during the first check for ancient db, the data is not yet in there,
|
|
|
|
// but when we reach into leveldb, the data was already moved. That would
|
|
|
|
// result in a not found error.
|
|
|
|
if len(data) == 0 {
|
|
|
|
data, _ = db.Ancient(freezerHashTable, number)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(data) == 0 {
|
|
|
|
return common.Hash{}
|
|
|
|
}
|
|
|
|
return common.BytesToHash(data)
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteCanonicalHash stores the hash assigned to a canonical block number.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func WriteCanonicalHash(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
|
|
|
if err := db.Put(headerHashKey(number), hash.Bytes()); err != nil {
|
|
|
|
log.Crit("Failed to store number to hash mapping", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteCanonicalHash removes the number to hash canonical mapping.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func DeleteCanonicalHash(db ethdb.KeyValueWriter, number uint64) {
|
|
|
|
if err := db.Delete(headerHashKey(number)); err != nil {
|
|
|
|
log.Crit("Failed to delete number to hash mapping", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
// ReadAllHashes retrieves all the hashes assigned to blocks at a certain heights,
|
|
|
|
// both canonical and reorged forks included.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func ReadAllHashes(db ethdb.Iteratee, number uint64) []common.Hash {
|
|
|
|
prefix := headerKeyPrefix(number)
|
|
|
|
|
|
|
|
hashes := make([]common.Hash, 0, 1)
|
|
|
|
it := db.NewIterator(prefix, nil)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
defer it.Release()
|
|
|
|
|
|
|
|
for it.Next() {
|
|
|
|
if key := it.Key(); len(key) == len(prefix)+32 {
|
|
|
|
hashes = append(hashes, common.BytesToHash(key[len(key)-32:]))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return hashes
|
|
|
|
}
|
|
|
|
|
|
|
|
type NumberHash struct {
|
|
|
|
Number uint64
|
|
|
|
Hash common.Hash
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadAllHashes retrieves all the hashes assigned to blocks at a certain heights,
|
|
|
|
// both canonical and reorged forks included.
|
|
|
|
// This method considers both limits to be _inclusive_.
|
|
|
|
func ReadAllHashesInRange(db ethdb.Iteratee, first, last uint64) []*NumberHash {
|
|
|
|
var (
|
|
|
|
start = encodeBlockNumber(first)
|
|
|
|
keyLength = len(headerPrefix) + 8 + 32
|
|
|
|
hashes = make([]*NumberHash, 0, 1+last-first)
|
|
|
|
it = db.NewIterator(headerPrefix, start)
|
|
|
|
)
|
|
|
|
defer it.Release()
|
|
|
|
for it.Next() {
|
|
|
|
key := it.Key()
|
|
|
|
if len(key) != keyLength {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
num := binary.BigEndian.Uint64(key[len(headerPrefix) : len(headerPrefix)+8])
|
|
|
|
if num > last {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
hash := common.BytesToHash(key[len(key)-32:])
|
|
|
|
hashes = append(hashes, &NumberHash{num, hash})
|
|
|
|
}
|
|
|
|
return hashes
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadAllCanonicalHashes retrieves all canonical number and hash mappings at the
|
|
|
|
// certain chain range. If the accumulated entries reaches the given threshold,
|
|
|
|
// abort the iteration and return the semi-finish result.
|
|
|
|
func ReadAllCanonicalHashes(db ethdb.Iteratee, from uint64, to uint64, limit int) ([]uint64, []common.Hash) {
|
|
|
|
// Short circuit if the limit is 0.
|
|
|
|
if limit == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
var (
|
|
|
|
numbers []uint64
|
|
|
|
hashes []common.Hash
|
|
|
|
)
|
|
|
|
// Construct the key prefix of start point.
|
|
|
|
start, end := headerHashKey(from), headerHashKey(to)
|
|
|
|
it := db.NewIterator(nil, start)
|
|
|
|
defer it.Release()
|
|
|
|
|
|
|
|
for it.Next() {
|
|
|
|
if bytes.Compare(it.Key(), end) >= 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if key := it.Key(); len(key) == len(headerPrefix)+8+1 && bytes.Equal(key[len(key)-1:], headerHashSuffix) {
|
|
|
|
numbers = append(numbers, binary.BigEndian.Uint64(key[len(headerPrefix):len(headerPrefix)+8]))
|
|
|
|
hashes = append(hashes, common.BytesToHash(it.Value()))
|
|
|
|
// If the accumulated entries reaches the limit threshold, return.
|
|
|
|
if len(numbers) >= limit {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return numbers, hashes
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadHeaderNumber returns the header number assigned to a hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func ReadHeaderNumber(db ethdb.KeyValueReader, hash common.Hash) *uint64 {
|
|
|
|
data, _ := db.Get(headerNumberKey(hash))
|
|
|
|
if len(data) != 8 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
number := binary.BigEndian.Uint64(data)
|
|
|
|
return &number
|
|
|
|
}
|
|
|
|
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
6 years ago
|
|
|
// WriteHeaderNumber stores the hash->number mapping.
|
|
|
|
func WriteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
|
|
|
key := headerNumberKey(hash)
|
|
|
|
enc := encodeBlockNumber(number)
|
|
|
|
if err := db.Put(key, enc); err != nil {
|
|
|
|
log.Crit("Failed to store hash to number mapping", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteHeaderNumber removes hash->number mapping.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func DeleteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash) {
|
|
|
|
if err := db.Delete(headerNumberKey(hash)); err != nil {
|
|
|
|
log.Crit("Failed to delete hash to number mapping", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadHeadHeaderHash retrieves the hash of the current canonical head header.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func ReadHeadHeaderHash(db ethdb.KeyValueReader) common.Hash {
|
|
|
|
data, _ := db.Get(headHeaderKey)
|
|
|
|
if len(data) == 0 {
|
|
|
|
return common.Hash{}
|
|
|
|
}
|
|
|
|
return common.BytesToHash(data)
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteHeadHeaderHash stores the hash of the current canonical head header.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func WriteHeadHeaderHash(db ethdb.KeyValueWriter, hash common.Hash) {
|
|
|
|
if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
|
|
|
|
log.Crit("Failed to store last header's hash", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadHeadBlockHash retrieves the hash of the current canonical head block.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func ReadHeadBlockHash(db ethdb.KeyValueReader) common.Hash {
|
|
|
|
data, _ := db.Get(headBlockKey)
|
|
|
|
if len(data) == 0 {
|
|
|
|
return common.Hash{}
|
|
|
|
}
|
|
|
|
return common.BytesToHash(data)
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteHeadBlockHash stores the head block's hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func WriteHeadBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
|
|
|
|
if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
|
|
|
|
log.Crit("Failed to store last block's hash", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadHeadFastBlockHash retrieves the hash of the current fast-sync head block.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func ReadHeadFastBlockHash(db ethdb.KeyValueReader) common.Hash {
|
|
|
|
data, _ := db.Get(headFastBlockKey)
|
|
|
|
if len(data) == 0 {
|
|
|
|
return common.Hash{}
|
|
|
|
}
|
|
|
|
return common.BytesToHash(data)
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteHeadFastBlockHash stores the hash of the current fast-sync head block.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func WriteHeadFastBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
|
|
|
|
if err := db.Put(headFastBlockKey, hash.Bytes()); err != nil {
|
|
|
|
log.Crit("Failed to store last fast block's hash", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadLastPivotNumber retrieves the number of the last pivot block. If the node
|
|
|
|
// full synced, the last pivot will always be nil.
|
|
|
|
func ReadLastPivotNumber(db ethdb.KeyValueReader) *uint64 {
|
|
|
|
data, _ := db.Get(lastPivotKey)
|
|
|
|
if len(data) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
var pivot uint64
|
|
|
|
if err := rlp.DecodeBytes(data, &pivot); err != nil {
|
|
|
|
log.Error("Invalid pivot block number in database", "err", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return &pivot
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteLastPivotNumber stores the number of the last pivot block.
|
|
|
|
func WriteLastPivotNumber(db ethdb.KeyValueWriter, pivot uint64) {
|
|
|
|
enc, err := rlp.EncodeToBytes(pivot)
|
|
|
|
if err != nil {
|
|
|
|
log.Crit("Failed to encode pivot block number", "err", err)
|
|
|
|
}
|
|
|
|
if err := db.Put(lastPivotKey, enc); err != nil {
|
|
|
|
log.Crit("Failed to store pivot block number", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadFastTrieProgress retrieves the number of tries nodes fast synced to allow
|
|
|
|
// reporting correct numbers across restarts.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func ReadFastTrieProgress(db ethdb.KeyValueReader) uint64 {
|
|
|
|
data, _ := db.Get(fastTrieProgressKey)
|
|
|
|
if len(data) == 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return new(big.Int).SetBytes(data).Uint64()
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteFastTrieProgress stores the fast sync trie process counter to support
|
|
|
|
// retrieving it across restarts.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func WriteFastTrieProgress(db ethdb.KeyValueWriter, count uint64) {
|
|
|
|
if err := db.Put(fastTrieProgressKey, new(big.Int).SetUint64(count).Bytes()); err != nil {
|
|
|
|
log.Crit("Failed to store fast sync trie progress", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadTxIndexTail retrieves the number of oldest indexed block
|
|
|
|
// whose transaction indices has been indexed. If the corresponding entry
|
|
|
|
// is non-existent in database it means the indexing has been finished.
|
|
|
|
func ReadTxIndexTail(db ethdb.KeyValueReader) *uint64 {
|
|
|
|
data, _ := db.Get(txIndexTailKey)
|
|
|
|
if len(data) != 8 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
number := binary.BigEndian.Uint64(data)
|
|
|
|
return &number
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteTxIndexTail stores the number of oldest indexed block
|
|
|
|
// into database.
|
|
|
|
func WriteTxIndexTail(db ethdb.KeyValueWriter, number uint64) {
|
|
|
|
if err := db.Put(txIndexTailKey, encodeBlockNumber(number)); err != nil {
|
|
|
|
log.Crit("Failed to store the transaction index tail", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadFastTxLookupLimit retrieves the tx lookup limit used in fast sync.
|
|
|
|
func ReadFastTxLookupLimit(db ethdb.KeyValueReader) *uint64 {
|
|
|
|
data, _ := db.Get(fastTxLookupLimitKey)
|
|
|
|
if len(data) != 8 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
number := binary.BigEndian.Uint64(data)
|
|
|
|
return &number
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteFastTxLookupLimit stores the txlookup limit used in fast sync into database.
|
|
|
|
func WriteFastTxLookupLimit(db ethdb.KeyValueWriter, number uint64) {
|
|
|
|
if err := db.Put(fastTxLookupLimitKey, encodeBlockNumber(number)); err != nil {
|
|
|
|
log.Crit("Failed to store transaction lookup limit for fast sync", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
|
|
|
// First try to look up the data in ancient database. Extra hash
|
|
|
|
// comparison is necessary since ancient database only maintains
|
|
|
|
// the canonical data.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
data, _ := db.Ancient(freezerHeaderTable, number)
|
|
|
|
if len(data) > 0 && crypto.Keccak256Hash(data) == hash {
|
|
|
|
return data
|
|
|
|
}
|
|
|
|
// Then try to look up the data in leveldb.
|
|
|
|
data, _ = db.Get(headerKey(number, hash))
|
|
|
|
if len(data) > 0 {
|
|
|
|
return data
|
|
|
|
}
|
|
|
|
// In the background freezer is moving data from leveldb to flatten files.
|
|
|
|
// So during the first check for ancient db, the data is not yet in there,
|
|
|
|
// but when we reach into leveldb, the data was already moved. That would
|
|
|
|
// result in a not found error.
|
|
|
|
data, _ = db.Ancient(freezerHeaderTable, number)
|
|
|
|
if len(data) > 0 && crypto.Keccak256Hash(data) == hash {
|
|
|
|
return data
|
|
|
|
}
|
|
|
|
return nil // Can't find the data anywhere.
|
|
|
|
}
|
|
|
|
|
|
|
|
// HasHeader verifies the existence of a block header corresponding to the hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func HasHeader(db ethdb.Reader, hash common.Hash, number uint64) bool {
|
|
|
|
if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if has, err := db.Has(headerKey(number, hash)); !has || err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadHeader retrieves the block header corresponding to the hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func ReadHeader(db ethdb.Reader, hash common.Hash, number uint64) *types.Header {
|
|
|
|
data := ReadHeaderRLP(db, hash, number)
|
|
|
|
if len(data) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
header := new(types.Header)
|
|
|
|
if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
|
|
|
|
log.Error("Invalid block header RLP", "hash", hash, "err", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return header
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteHeader stores a block header into the database and also stores the hash-
|
|
|
|
// to-number mapping.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func WriteHeader(db ethdb.KeyValueWriter, header *types.Header) {
|
|
|
|
var (
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
6 years ago
|
|
|
hash = header.Hash()
|
|
|
|
number = header.Number.Uint64()
|
|
|
|
)
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
6 years ago
|
|
|
// Write the hash -> number mapping
|
|
|
|
WriteHeaderNumber(db, hash, number)
|
|
|
|
|
|
|
|
// Write the encoded header
|
|
|
|
data, err := rlp.EncodeToBytes(header)
|
|
|
|
if err != nil {
|
|
|
|
log.Crit("Failed to RLP encode header", "err", err)
|
|
|
|
}
|
core, cmd, vendor: fixes and database inspection tool (#15)
* core, eth: some fixes for freezer
* vendor, core/rawdb, cmd/geth: add db inspector
* core, cmd/utils: check ancient store path forceily
* cmd/geth, common, core/rawdb: a few fixes
* cmd/geth: support windows file rename and fix rename error
* core: support ancient plugin
* core, cmd: streaming file copy
* cmd, consensus, core, tests: keep genesis in leveldb
* core: write txlookup during ancient init
* core: bump database version
6 years ago
|
|
|
key := headerKey(number, hash)
|
|
|
|
if err := db.Put(key, data); err != nil {
|
|
|
|
log.Crit("Failed to store header", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteHeader removes all block header data associated with a hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func DeleteHeader(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
|
|
|
deleteHeaderWithoutNumber(db, hash, number)
|
|
|
|
if err := db.Delete(headerNumberKey(hash)); err != nil {
|
|
|
|
log.Crit("Failed to delete hash to number mapping", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// deleteHeaderWithoutNumber removes only the block header but does not remove
|
|
|
|
// the hash to number mapping.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
|
|
|
if err := db.Delete(headerKey(number, hash)); err != nil {
|
|
|
|
log.Crit("Failed to delete header", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
|
|
|
// First try to look up the data in ancient database. Extra hash
|
|
|
|
// comparison is necessary since ancient database only maintains
|
|
|
|
// the canonical data.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
data, _ := db.Ancient(freezerBodiesTable, number)
|
|
|
|
if len(data) > 0 {
|
|
|
|
h, _ := db.Ancient(freezerHashTable, number)
|
|
|
|
if common.BytesToHash(h) == hash {
|
|
|
|
return data
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Then try to look up the data in leveldb.
|
|
|
|
data, _ = db.Get(blockBodyKey(number, hash))
|
|
|
|
if len(data) > 0 {
|
|
|
|
return data
|
|
|
|
}
|
|
|
|
// In the background freezer is moving data from leveldb to flatten files.
|
|
|
|
// So during the first check for ancient db, the data is not yet in there,
|
|
|
|
// but when we reach into leveldb, the data was already moved. That would
|
|
|
|
// result in a not found error.
|
|
|
|
data, _ = db.Ancient(freezerBodiesTable, number)
|
|
|
|
if len(data) > 0 {
|
|
|
|
h, _ := db.Ancient(freezerHashTable, number)
|
|
|
|
if common.BytesToHash(h) == hash {
|
|
|
|
return data
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil // Can't find the data anywhere.
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical
|
|
|
|
// block at number, in RLP encoding.
|
|
|
|
func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue {
|
|
|
|
// If it's an ancient one, we don't need the canonical hash
|
|
|
|
data, _ := db.Ancient(freezerBodiesTable, number)
|
|
|
|
if len(data) == 0 {
|
|
|
|
// Need to get the hash
|
|
|
|
data, _ = db.Get(blockBodyKey(number, ReadCanonicalHash(db, number)))
|
|
|
|
// In the background freezer is moving data from leveldb to flatten files.
|
|
|
|
// So during the first check for ancient db, the data is not yet in there,
|
|
|
|
// but when we reach into leveldb, the data was already moved. That would
|
|
|
|
// result in a not found error.
|
|
|
|
if len(data) == 0 {
|
|
|
|
data, _ = db.Ancient(freezerBodiesTable, number)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return data
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteBodyRLP stores an RLP encoded block body into the database.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp rlp.RawValue) {
|
|
|
|
if err := db.Put(blockBodyKey(number, hash), rlp); err != nil {
|
|
|
|
log.Crit("Failed to store block body", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// HasBody verifies the existence of a block body corresponding to the hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool {
|
|
|
|
if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadBody retrieves the block body corresponding to the hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func ReadBody(db ethdb.Reader, hash common.Hash, number uint64) *types.Body {
|
|
|
|
data := ReadBodyRLP(db, hash, number)
|
|
|
|
if len(data) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
body := new(types.Body)
|
|
|
|
if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
|
|
|
|
log.Error("Invalid block body RLP", "hash", hash, "err", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return body
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteBody stores a block body into the database.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func WriteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64, body *types.Body) {
|
|
|
|
data, err := rlp.EncodeToBytes(body)
|
|
|
|
if err != nil {
|
|
|
|
log.Crit("Failed to RLP encode body", "err", err)
|
|
|
|
}
|
|
|
|
WriteBodyRLP(db, hash, number, data)
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteBody removes all block body data associated with a hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
|
|
|
if err := db.Delete(blockBodyKey(number, hash)); err != nil {
|
|
|
|
log.Crit("Failed to delete block body", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
|
|
|
// First try to look up the data in ancient database. Extra hash
|
|
|
|
// comparison is necessary since ancient database only maintains
|
|
|
|
// the canonical data.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
data, _ := db.Ancient(freezerDifficultyTable, number)
|
|
|
|
if len(data) > 0 {
|
|
|
|
h, _ := db.Ancient(freezerHashTable, number)
|
|
|
|
if common.BytesToHash(h) == hash {
|
|
|
|
return data
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Then try to look up the data in leveldb.
|
|
|
|
data, _ = db.Get(headerTDKey(number, hash))
|
|
|
|
if len(data) > 0 {
|
|
|
|
return data
|
|
|
|
}
|
|
|
|
// In the background freezer is moving data from leveldb to flatten files.
|
|
|
|
// So during the first check for ancient db, the data is not yet in there,
|
|
|
|
// but when we reach into leveldb, the data was already moved. That would
|
|
|
|
// result in a not found error.
|
|
|
|
data, _ = db.Ancient(freezerDifficultyTable, number)
|
|
|
|
if len(data) > 0 {
|
|
|
|
h, _ := db.Ancient(freezerHashTable, number)
|
|
|
|
if common.BytesToHash(h) == hash {
|
|
|
|
return data
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil // Can't find the data anywhere.
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadTd retrieves a block's total difficulty corresponding to the hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func ReadTd(db ethdb.Reader, hash common.Hash, number uint64) *big.Int {
|
|
|
|
data := ReadTdRLP(db, hash, number)
|
|
|
|
if len(data) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
td := new(big.Int)
|
|
|
|
if err := rlp.Decode(bytes.NewReader(data), td); err != nil {
|
|
|
|
log.Error("Invalid block total difficulty RLP", "hash", hash, "err", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return td
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteTd stores the total difficulty of a block into the database.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func WriteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64, td *big.Int) {
|
|
|
|
data, err := rlp.EncodeToBytes(td)
|
|
|
|
if err != nil {
|
|
|
|
log.Crit("Failed to RLP encode block total difficulty", "err", err)
|
|
|
|
}
|
|
|
|
if err := db.Put(headerTDKey(number, hash), data); err != nil {
|
|
|
|
log.Crit("Failed to store block total difficulty", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteTd removes all block total difficulty data associated with a hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func DeleteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
|
|
|
if err := db.Delete(headerTDKey(number, hash)); err != nil {
|
|
|
|
log.Crit("Failed to delete block total difficulty", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// HasReceipts verifies the existence of all the transaction receipts belonging
|
|
|
|
// to a block.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool {
|
|
|
|
if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
|
|
|
|
// First try to look up the data in ancient database. Extra hash
|
|
|
|
// comparison is necessary since ancient database only maintains
|
|
|
|
// the canonical data.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
data, _ := db.Ancient(freezerReceiptTable, number)
|
|
|
|
if len(data) > 0 {
|
|
|
|
h, _ := db.Ancient(freezerHashTable, number)
|
|
|
|
if common.BytesToHash(h) == hash {
|
|
|
|
return data
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Then try to look up the data in leveldb.
|
|
|
|
data, _ = db.Get(blockReceiptsKey(number, hash))
|
|
|
|
if len(data) > 0 {
|
|
|
|
return data
|
|
|
|
}
|
|
|
|
// In the background freezer is moving data from leveldb to flatten files.
|
|
|
|
// So during the first check for ancient db, the data is not yet in there,
|
|
|
|
// but when we reach into leveldb, the data was already moved. That would
|
|
|
|
// result in a not found error.
|
|
|
|
data, _ = db.Ancient(freezerReceiptTable, number)
|
|
|
|
if len(data) > 0 {
|
|
|
|
h, _ := db.Ancient(freezerHashTable, number)
|
|
|
|
if common.BytesToHash(h) == hash {
|
|
|
|
return data
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil // Can't find the data anywhere.
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadRawReceipts retrieves all the transaction receipts belonging to a block.
|
|
|
|
// The receipt metadata fields are not guaranteed to be populated, so they
|
|
|
|
// should not be used. Use ReadReceipts instead if the metadata is needed.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func ReadRawReceipts(db ethdb.Reader, hash common.Hash, number uint64) types.Receipts {
|
|
|
|
// Retrieve the flattened receipt slice
|
|
|
|
data := ReadReceiptsRLP(db, hash, number)
|
|
|
|
if len(data) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// Convert the receipts from their storage form to their internal representation
|
|
|
|
storageReceipts := []*types.ReceiptForStorage{}
|
|
|
|
if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
|
|
|
|
log.Error("Invalid receipt array RLP", "hash", hash, "err", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
receipts := make(types.Receipts, len(storageReceipts))
|
|
|
|
for i, storageReceipt := range storageReceipts {
|
|
|
|
receipts[i] = (*types.Receipt)(storageReceipt)
|
|
|
|
}
|
|
|
|
return receipts
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadReceipts retrieves all the transaction receipts belonging to a block, including
|
|
|
|
// its correspoinding metadata fields. If it is unable to populate these metadata
|
|
|
|
// fields then nil is returned.
|
|
|
|
//
|
|
|
|
// The current implementation populates these metadata fields by reading the receipts'
|
|
|
|
// corresponding block body, so if the block body is not found it will return nil even
|
|
|
|
// if the receipt itself is stored.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func ReadReceipts(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) types.Receipts {
|
|
|
|
// We're deriving many fields from the block body, retrieve beside the receipt
|
|
|
|
receipts := ReadRawReceipts(db, hash, number)
|
|
|
|
if receipts == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
body := ReadBody(db, hash, number)
|
|
|
|
if body == nil {
|
|
|
|
log.Error("Missing body but have receipt", "hash", hash, "number", number)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if err := receipts.DeriveFields(config, hash, number, body.Transactions); err != nil {
|
|
|
|
log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return receipts
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteReceipts stores all the transaction receipts belonging to a block.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func WriteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64, receipts types.Receipts) {
|
|
|
|
// Convert the receipts into their storage form and serialize them
|
|
|
|
storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
|
|
|
|
for i, receipt := range receipts {
|
|
|
|
storageReceipts[i] = (*types.ReceiptForStorage)(receipt)
|
|
|
|
}
|
|
|
|
bytes, err := rlp.EncodeToBytes(storageReceipts)
|
|
|
|
if err != nil {
|
|
|
|
log.Crit("Failed to encode block receipts", "err", err)
|
|
|
|
}
|
|
|
|
// Store the flattened receipt slice
|
|
|
|
if err := db.Put(blockReceiptsKey(number, hash), bytes); err != nil {
|
|
|
|
log.Crit("Failed to store block receipts", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteReceipts removes all receipt data associated with a block hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func DeleteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
|
|
|
if err := db.Delete(blockReceiptsKey(number, hash)); err != nil {
|
|
|
|
log.Crit("Failed to delete block receipts", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadBlock retrieves an entire block corresponding to the hash, assembling it
|
|
|
|
// back from the stored header and body. If either the header or body could not
|
|
|
|
// be retrieved nil is returned.
|
|
|
|
//
|
|
|
|
// Note, due to concurrent download of header and block body the header and thus
|
|
|
|
// canonical hash can be stored in the database but the body data not (yet).
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func ReadBlock(db ethdb.Reader, hash common.Hash, number uint64) *types.Block {
|
|
|
|
header := ReadHeader(db, hash, number)
|
|
|
|
if header == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
body := ReadBody(db, hash, number)
|
|
|
|
if body == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles)
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteBlock serializes a block into the database, header and body separately.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) {
|
|
|
|
WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
|
|
|
|
WriteHeader(db, block.Header())
|
|
|
|
}
|
|
|
|
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
// WriteAncientBlock writes entire block data into ancient store and returns the total written size.
|
|
|
|
func WriteAncientBlocks(db ethdb.AncientWriter, blocks []*types.Block, receipts []types.Receipts, td *big.Int) (int64, error) {
|
|
|
|
var (
|
|
|
|
tdSum = new(big.Int).Set(td)
|
|
|
|
stReceipts []*types.ReceiptForStorage
|
|
|
|
)
|
|
|
|
return db.ModifyAncients(func(op ethdb.AncientWriteOp) error {
|
|
|
|
for i, block := range blocks {
|
|
|
|
// Convert receipts to storage format and sum up total difficulty.
|
|
|
|
stReceipts = stReceipts[:0]
|
|
|
|
for _, receipt := range receipts[i] {
|
|
|
|
stReceipts = append(stReceipts, (*types.ReceiptForStorage)(receipt))
|
|
|
|
}
|
|
|
|
header := block.Header()
|
|
|
|
if i > 0 {
|
|
|
|
tdSum.Add(tdSum, header.Difficulty)
|
|
|
|
}
|
|
|
|
if err := writeAncientBlock(op, block, header, stReceipts, tdSum); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func writeAncientBlock(op ethdb.AncientWriteOp, block *types.Block, header *types.Header, receipts []*types.ReceiptForStorage, td *big.Int) error {
|
|
|
|
num := block.NumberU64()
|
|
|
|
if err := op.AppendRaw(freezerHashTable, num, block.Hash().Bytes()); err != nil {
|
|
|
|
return fmt.Errorf("can't add block %d hash: %v", num, err)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
}
|
|
|
|
if err := op.Append(freezerHeaderTable, num, header); err != nil {
|
|
|
|
return fmt.Errorf("can't append block header %d: %v", num, err)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
}
|
|
|
|
if err := op.Append(freezerBodiesTable, num, block.Body()); err != nil {
|
|
|
|
return fmt.Errorf("can't append block body %d: %v", num, err)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
}
|
|
|
|
if err := op.Append(freezerReceiptTable, num, receipts); err != nil {
|
|
|
|
return fmt.Errorf("can't append block %d receipts: %v", num, err)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
}
|
|
|
|
if err := op.Append(freezerDifficultyTable, num, td); err != nil {
|
|
|
|
return fmt.Errorf("can't append block %d total difficulty: %v", num, err)
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
}
|
|
|
|
return nil
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteBlock removes all block data associated with a hash.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func DeleteBlock(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
|
|
|
DeleteReceipts(db, hash, number)
|
|
|
|
DeleteHeader(db, hash, number)
|
|
|
|
DeleteBody(db, hash, number)
|
|
|
|
DeleteTd(db, hash, number)
|
|
|
|
}
|
|
|
|
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
// DeleteBlockWithoutNumber removes all block data associated with a hash, except
|
|
|
|
// the hash to number mapping.
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func DeleteBlockWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
|
|
|
|
DeleteReceipts(db, hash, number)
|
|
|
|
deleteHeaderWithoutNumber(db, hash, number)
|
|
|
|
DeleteBody(db, hash, number)
|
|
|
|
DeleteTd(db, hash, number)
|
|
|
|
}
|
|
|
|
|
|
|
|
const badBlockToKeep = 10
|
|
|
|
|
|
|
|
type badBlock struct {
|
|
|
|
Header *types.Header
|
|
|
|
Body *types.Body
|
|
|
|
}
|
|
|
|
|
|
|
|
// badBlockList implements the sort interface to allow sorting a list of
|
|
|
|
// bad blocks by their number in the reverse order.
|
|
|
|
type badBlockList []*badBlock
|
|
|
|
|
|
|
|
func (s badBlockList) Len() int { return len(s) }
|
|
|
|
func (s badBlockList) Less(i, j int) bool {
|
|
|
|
return s[i].Header.Number.Uint64() < s[j].Header.Number.Uint64()
|
|
|
|
}
|
|
|
|
func (s badBlockList) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
|
|
|
|
|
|
|
// ReadBadBlock retrieves the bad block with the corresponding block hash.
|
|
|
|
func ReadBadBlock(db ethdb.Reader, hash common.Hash) *types.Block {
|
|
|
|
blob, err := db.Get(badBlockKey)
|
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
var badBlocks badBlockList
|
|
|
|
if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
for _, bad := range badBlocks {
|
|
|
|
if bad.Header.Hash() == hash {
|
|
|
|
return types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadAllBadBlocks retrieves all the bad blocks in the database.
|
|
|
|
// All returned blocks are sorted in reverse order by number.
|
|
|
|
func ReadAllBadBlocks(db ethdb.Reader) []*types.Block {
|
|
|
|
blob, err := db.Get(badBlockKey)
|
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
var badBlocks badBlockList
|
|
|
|
if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
var blocks []*types.Block
|
|
|
|
for _, bad := range badBlocks {
|
|
|
|
blocks = append(blocks, types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles))
|
|
|
|
}
|
|
|
|
return blocks
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteBadBlock serializes the bad block into the database. If the cumulated
|
|
|
|
// bad blocks exceeds the limitation, the oldest will be dropped.
|
|
|
|
func WriteBadBlock(db ethdb.KeyValueStore, block *types.Block) {
|
|
|
|
blob, err := db.Get(badBlockKey)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn("Failed to load old bad blocks", "error", err)
|
|
|
|
}
|
|
|
|
var badBlocks badBlockList
|
|
|
|
if len(blob) > 0 {
|
|
|
|
if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
|
|
|
|
log.Crit("Failed to decode old bad blocks", "error", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, b := range badBlocks {
|
|
|
|
if b.Header.Number.Uint64() == block.NumberU64() && b.Header.Hash() == block.Hash() {
|
|
|
|
log.Info("Skip duplicated bad block", "number", block.NumberU64(), "hash", block.Hash())
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
badBlocks = append(badBlocks, &badBlock{
|
|
|
|
Header: block.Header(),
|
|
|
|
Body: block.Body(),
|
|
|
|
})
|
|
|
|
sort.Sort(sort.Reverse(badBlocks))
|
|
|
|
if len(badBlocks) > badBlockToKeep {
|
|
|
|
badBlocks = badBlocks[:badBlockToKeep]
|
|
|
|
}
|
|
|
|
data, err := rlp.EncodeToBytes(badBlocks)
|
|
|
|
if err != nil {
|
|
|
|
log.Crit("Failed to encode bad blocks", "err", err)
|
|
|
|
}
|
|
|
|
if err := db.Put(badBlockKey, data); err != nil {
|
|
|
|
log.Crit("Failed to write bad blocks", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteBadBlocks deletes all the bad blocks from the database
|
|
|
|
func DeleteBadBlocks(db ethdb.KeyValueWriter) {
|
|
|
|
if err := db.Delete(badBlockKey); err != nil {
|
|
|
|
log.Crit("Failed to delete bad blocks", "err", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// FindCommonAncestor returns the last common ancestor of two block headers
|
all: integrate the freezer with fast sync
* all: freezer style syncing
core, eth, les, light: clean up freezer relative APIs
core, eth, les, trie, ethdb, light: clean a bit
core, eth, les, light: add unit tests
core, light: rewrite setHead function
core, eth: fix downloader unit tests
core: add receipt chain insertion test
core: use constant instead of hardcoding table name
core: fix rollback
core: fix setHead
core/rawdb: remove canonical block first and then iterate side chain
core/rawdb, ethdb: add hasAncient interface
eth/downloader: calculate ancient limit via cht first
core, eth, ethdb: lots of fixes
* eth/downloader: print ancient disable log only for fast sync
6 years ago
|
|
|
func FindCommonAncestor(db ethdb.Reader, a, b *types.Header) *types.Header {
|
|
|
|
for bn := b.Number.Uint64(); a.Number.Uint64() > bn; {
|
|
|
|
a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1)
|
|
|
|
if a == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for an := a.Number.Uint64(); an < b.Number.Uint64(); {
|
|
|
|
b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1)
|
|
|
|
if b == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for a.Hash() != b.Hash() {
|
|
|
|
a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1)
|
|
|
|
if a == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1)
|
|
|
|
if b == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return a
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadHeadHeader returns the current canonical head header.
|
|
|
|
func ReadHeadHeader(db ethdb.Reader) *types.Header {
|
|
|
|
headHeaderHash := ReadHeadHeaderHash(db)
|
|
|
|
if headHeaderHash == (common.Hash{}) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
headHeaderNumber := ReadHeaderNumber(db, headHeaderHash)
|
|
|
|
if headHeaderNumber == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return ReadHeader(db, headHeaderHash, *headHeaderNumber)
|
|
|
|
}
|
|
|
|
|
|
|
|
// ReadHeadBlock returns the current canonical head block.
|
|
|
|
func ReadHeadBlock(db ethdb.Reader) *types.Block {
|
|
|
|
headBlockHash := ReadHeadBlockHash(db)
|
|
|
|
if headBlockHash == (common.Hash{}) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
headBlockNumber := ReadHeaderNumber(db, headBlockHash)
|
|
|
|
if headBlockNumber == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return ReadBlock(db, headBlockHash, *headBlockNumber)
|
|
|
|
}
|