Official Go implementation of the Ethereum protocol
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
go-ethereum/core/blockchain_snapshot_test.go

667 lines
21 KiB

// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Tests that abnormal program termination (i.e.crash) and restart can recovery
// the snapshot properly if the snapshot is enabled.
package core
import (
"bytes"
"fmt"
"math/big"
"os"
"strings"
"testing"
"time"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/ethash"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/params"
)
// snapshotTestBasic wraps the common testing fields in the snapshot tests.
type snapshotTestBasic struct {
chainBlocks int // Number of blocks to generate for the canonical chain
snapshotBlock uint64 // Block number of the relevant snapshot disk layer
commitBlock uint64 // Block number for which to commit the state to disk
expCanonicalBlocks int // Number of canonical blocks expected to remain in the database (excl. genesis)
expHeadHeader uint64 // Block number of the expected head header
expHeadFastBlock uint64 // Block number of the expected head fast sync block
expHeadBlock uint64 // Block number of the expected head full block
expSnapshotBottom uint64 // The block height corresponding to the snapshot disk layer
// share fields, set in runtime
datadir string
db ethdb.Database
gendb ethdb.Database
engine consensus.Engine
}
func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Block) {
// Create a temporary persistent database
datadir := t.TempDir()
db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "", false)
if err != nil {
t.Fatalf("Failed to create persistent database: %v", err)
}
// Initialize a fresh chain
var (
genesis = (&Genesis{BaseFee: big.NewInt(params.InitialBaseFee)}).MustCommit(db)
engine = ethash.NewFullFaker()
gendb = rawdb.NewMemoryDatabase()
// Snapshot is enabled, the first snapshot is created from the Genesis.
// The snapshot memory allowance is 256MB, it means no snapshot flush
// will happen during the block insertion.
cacheConfig = defaultCacheConfig
)
chain, err := NewBlockChain(db, cacheConfig, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to create chain: %v", err)
}
blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, gendb, basic.chainBlocks, func(i int, b *BlockGen) {})
// Insert the blocks with configured settings.
var breakpoints []uint64
if basic.commitBlock > basic.snapshotBlock {
breakpoints = append(breakpoints, basic.snapshotBlock, basic.commitBlock)
} else {
breakpoints = append(breakpoints, basic.commitBlock, basic.snapshotBlock)
}
var startPoint uint64
for _, point := range breakpoints {
if _, err := chain.InsertChain(blocks[startPoint:point]); err != nil {
t.Fatalf("Failed to import canonical chain start: %v", err)
}
startPoint = point
if basic.commitBlock > 0 && basic.commitBlock == point {
chain.stateCache.TrieDB().Commit(blocks[point-1].Root(), true, nil)
}
if basic.snapshotBlock > 0 && basic.snapshotBlock == point {
// Flushing the entire snap tree into the disk, the
// relevant (a) snapshot root and (b) snapshot generator
// will be persisted atomically.
chain.snaps.Cap(blocks[point-1].Root(), 0)
diskRoot, blockRoot := chain.snaps.DiskRoot(), blocks[point-1].Root()
if !bytes.Equal(diskRoot.Bytes(), blockRoot.Bytes()) {
t.Fatalf("Failed to flush disk layer change, want %x, got %x", blockRoot, diskRoot)
}
}
}
if _, err := chain.InsertChain(blocks[startPoint:]); err != nil {
t.Fatalf("Failed to import canonical chain tail: %v", err)
}
// Set runtime fields
basic.datadir = datadir
basic.db = db
basic.gendb = gendb
basic.engine = engine
return chain, blocks
}
func (basic *snapshotTestBasic) verify(t *testing.T, chain *BlockChain, blocks []*types.Block) {
// Iterate over all the remaining blocks and ensure there are no gaps
verifyNoGaps(t, chain, true, blocks)
verifyCutoff(t, chain, true, blocks, basic.expCanonicalBlocks)
if head := chain.CurrentHeader(); head.Number.Uint64() != basic.expHeadHeader {
t.Errorf("Head header mismatch: have %d, want %d", head.Number, basic.expHeadHeader)
}
if head := chain.CurrentFastBlock(); head.NumberU64() != basic.expHeadFastBlock {
t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), basic.expHeadFastBlock)
}
if head := chain.CurrentBlock(); head.NumberU64() != basic.expHeadBlock {
t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), basic.expHeadBlock)
}
// Check the disk layer, ensure they are matched
block := chain.GetBlockByNumber(basic.expSnapshotBottom)
if block == nil {
t.Errorf("The correspnding block[%d] of snapshot disk layer is missing", basic.expSnapshotBottom)
} else if !bytes.Equal(chain.snaps.DiskRoot().Bytes(), block.Root().Bytes()) {
t.Errorf("The snapshot disk layer root is incorrect, want %x, get %x", block.Root(), chain.snaps.DiskRoot())
}
// Check the snapshot, ensure it's integrated
all: bloom-filter based pruning mechanism (#21724) * cmd, core, tests: initial state pruner core: fix db inspector cmd/geth: add verify-state cmd/geth: add verification tool core/rawdb: implement flatdb cmd, core: fix rebase core/state: use new contract code layout core/state/pruner: avoid deleting genesis state cmd/geth: add helper function core, cmd: fix extract genesis core: minor fixes contracts: remove useless core/state/snapshot: plugin stacktrie core: polish core/state/snapshot: iterate storage concurrently core/state/snapshot: fix iteration core: add comments core/state/snapshot: polish code core/state: polish core/state/snapshot: rebase core/rawdb: add comments core/rawdb: fix tests core/rawdb: improve tests core/state/snapshot: fix concurrent iteration core/state: run pruning during the recovery core, trie: implement martin's idea core, eth: delete flatdb and polish pruner trie: fix import core/state/pruner: add log core/state/pruner: fix issues core/state/pruner: don't read back core/state/pruner: fix contract code write core/state/pruner: check root node presence cmd, core: polish log core/state: use HEAD-127 as the target core/state/snapshot: improve tests cmd/geth: fix verification tool cmd/geth: use HEAD as the verification default target all: replace the bloomfilter with martin's fork cmd, core: polish code core, cmd: forcibly delete state root core/state/pruner: add hash64 core/state/pruner: fix blacklist core/state: remove blacklist cmd, core: delete trie clean cache before pruning cmd, core: fix lint cmd, core: fix rebase core/state: fix the special case for clique networks core/state/snapshot: remove useless code core/state/pruner: capping the snapshot after pruning cmd, core, eth: fixes core/rawdb: update db inspector cmd/geth: polish code core/state/pruner: fsync bloom filter cmd, core: print warning log core/state/pruner: adjust the parameters for bloom filter cmd, core: create the bloom filter by size core: polish core/state/pruner: sanitize invalid bloomfilter size cmd: address comments cmd/geth: address comments cmd/geth: address comment core/state/pruner: address comments core/state/pruner: rename homedir to datadir cmd, core: address comments core/state/pruner: address comment core/state: address comments core, cmd, tests: address comments core: address comments core/state/pruner: release the iterator after each commit core/state/pruner: improve pruner cmd, core: adjust bloom paramters core/state/pruner: fix lint core/state/pruner: fix tests core: fix rebase core/state/pruner: remove atomic rename core/state/pruner: address comments all: run go mod tidy core/state/pruner: avoid false-positive for the middle state roots core/state/pruner: add checks for middle roots cmd/geth: replace crit with error * core/state/pruner: fix lint * core: drop legacy bloom filter * core/state/snapshot: improve pruner * core/state/snapshot: polish concurrent logs to report ETA vs. hashes * core/state/pruner: add progress report for pruning and compaction too * core: fix snapshot test API * core/state: fix some pruning logs * core/state/pruner: support recovering from bloom flush fail Co-authored-by: Péter Szilágyi <peterke@gmail.com>
4 years ago
if err := chain.snaps.Verify(block.Root()); err != nil {
t.Errorf("The disk layer is not integrated %v", err)
}
}
//nolint:unused
func (basic *snapshotTestBasic) dump() string {
buffer := new(strings.Builder)
fmt.Fprint(buffer, "Chain:\n G")
for i := 0; i < basic.chainBlocks; i++ {
fmt.Fprintf(buffer, "->C%d", i+1)
}
fmt.Fprint(buffer, " (HEAD)\n\n")
fmt.Fprintf(buffer, "Commit: G")
if basic.commitBlock > 0 {
fmt.Fprintf(buffer, ", C%d", basic.commitBlock)
}
fmt.Fprint(buffer, "\n")
fmt.Fprintf(buffer, "Snapshot: G")
if basic.snapshotBlock > 0 {
fmt.Fprintf(buffer, ", C%d", basic.snapshotBlock)
}
fmt.Fprint(buffer, "\n")
//if crash {
// fmt.Fprintf(buffer, "\nCRASH\n\n")
//} else {
// fmt.Fprintf(buffer, "\nSetHead(%d)\n\n", basic.setHead)
//}
fmt.Fprintf(buffer, "------------------------------\n\n")
fmt.Fprint(buffer, "Expected in leveldb:\n G")
for i := 0; i < basic.expCanonicalBlocks; i++ {
fmt.Fprintf(buffer, "->C%d", i+1)
}
fmt.Fprintf(buffer, "\n\n")
fmt.Fprintf(buffer, "Expected head header : C%d\n", basic.expHeadHeader)
fmt.Fprintf(buffer, "Expected head fast block: C%d\n", basic.expHeadFastBlock)
if basic.expHeadBlock == 0 {
fmt.Fprintf(buffer, "Expected head block : G\n")
} else {
fmt.Fprintf(buffer, "Expected head block : C%d\n", basic.expHeadBlock)
}
if basic.expSnapshotBottom == 0 {
fmt.Fprintf(buffer, "Expected snapshot disk : G\n")
} else {
fmt.Fprintf(buffer, "Expected snapshot disk : C%d\n", basic.expSnapshotBottom)
}
return buffer.String()
}
func (basic *snapshotTestBasic) teardown() {
basic.db.Close()
basic.gendb.Close()
os.RemoveAll(basic.datadir)
}
// snapshotTest is a test case type for normal snapshot recovery.
// It can be used for testing that restart Geth normally.
type snapshotTest struct {
snapshotTestBasic
}
func (snaptest *snapshotTest) test(t *testing.T) {
// It's hard to follow the test case, visualize the input
// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
// fmt.Println(tt.dump())
chain, blocks := snaptest.prepare(t)
// Restart the chain normally
chain.Stop()
newchain, err := NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
defer newchain.Stop()
snaptest.verify(t, newchain, blocks)
}
// crashSnapshotTest is a test case type for innormal snapshot recovery.
// It can be used for testing that restart Geth after the crash.
type crashSnapshotTest struct {
snapshotTestBasic
}
func (snaptest *crashSnapshotTest) test(t *testing.T) {
// It's hard to follow the test case, visualize the input
// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
// fmt.Println(tt.dump())
chain, blocks := snaptest.prepare(t)
// Pull the plug on the database, simulating a hard crash
db := chain.db
db.Close()
// Start a new blockchain back up and see where the repair leads us
newdb, err := rawdb.NewLevelDBDatabaseWithFreezer(snaptest.datadir, 0, 0, snaptest.datadir, "", false)
if err != nil {
t.Fatalf("Failed to reopen persistent database: %v", err)
}
defer newdb.Close()
// The interesting thing is: instead of starting the blockchain after
// the crash, we do restart twice here: one after the crash and one
// after the normal stop. It's used to ensure the broken snapshot
// can be detected all the time.
newchain, err := NewBlockChain(newdb, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
newchain.Stop()
newchain, err = NewBlockChain(newdb, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
defer newchain.Stop()
snaptest.verify(t, newchain, blocks)
}
// gappedSnapshotTest is a test type used to test this scenario:
// - have a complete snapshot
// - restart without enabling the snapshot
// - insert a few blocks
// - restart with enabling the snapshot again
type gappedSnapshotTest struct {
snapshotTestBasic
gapped int // Number of blocks to insert without enabling snapshot
}
func (snaptest *gappedSnapshotTest) test(t *testing.T) {
// It's hard to follow the test case, visualize the input
// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
// fmt.Println(tt.dump())
chain, blocks := snaptest.prepare(t)
// Insert blocks without enabling snapshot if gapping is required.
chain.Stop()
gappedBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.gendb, snaptest.gapped, func(i int, b *BlockGen) {})
// Insert a few more blocks without enabling snapshot
var cacheConfig = &CacheConfig{
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
SnapshotLimit: 0,
}
newchain, err := NewBlockChain(snaptest.db, cacheConfig, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
newchain.InsertChain(gappedBlocks)
newchain.Stop()
// Restart the chain with enabling the snapshot
newchain, err = NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
defer newchain.Stop()
snaptest.verify(t, newchain, blocks)
}
// setHeadSnapshotTest is the test type used to test this scenario:
// - have a complete snapshot
// - set the head to a lower point
// - restart
type setHeadSnapshotTest struct {
snapshotTestBasic
setHead uint64 // Block number to set head back to
}
func (snaptest *setHeadSnapshotTest) test(t *testing.T) {
// It's hard to follow the test case, visualize the input
// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
// fmt.Println(tt.dump())
chain, blocks := snaptest.prepare(t)
// Rewind the chain if setHead operation is required.
chain.SetHead(snaptest.setHead)
chain.Stop()
newchain, err := NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
defer newchain.Stop()
snaptest.verify(t, newchain, blocks)
}
// wipeCrashSnapshotTest is the test type used to test this scenario:
// - have a complete snapshot
// - restart, insert more blocks without enabling the snapshot
// - restart again with enabling the snapshot
// - crash
type wipeCrashSnapshotTest struct {
snapshotTestBasic
newBlocks int
}
func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) {
// It's hard to follow the test case, visualize the input
// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
// fmt.Println(tt.dump())
chain, blocks := snaptest.prepare(t)
// Firstly, stop the chain properly, with all snapshot journal
// and state committed.
chain.Stop()
config := &CacheConfig{
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
SnapshotLimit: 0,
}
newchain, err := NewBlockChain(snaptest.db, config, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
newBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.gendb, snaptest.newBlocks, func(i int, b *BlockGen) {})
newchain.InsertChain(newBlocks)
newchain.Stop()
// Restart the chain, the wiper should starts working
config = &CacheConfig{
TrieCleanLimit: 256,
TrieDirtyLimit: 256,
TrieTimeLimit: 5 * time.Minute,
SnapshotLimit: 256,
SnapshotWait: false, // Don't wait rebuild
}
_, err = NewBlockChain(snaptest.db, config, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
// Simulate the blockchain crash.
newchain, err = NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil)
if err != nil {
t.Fatalf("Failed to recreate chain: %v", err)
}
snaptest.verify(t, newchain, blocks)
}
// Tests a Geth restart with valid snapshot. Before the shutdown, all snapshot
// journal will be persisted correctly. In this case no snapshot recovery is
// required.
func TestRestartWithNewSnapshot(t *testing.T) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
//
// Commit: G
// Snapshot: G
//
// SetHead(0)
//
// ------------------------------
//
// Expected in leveldb:
// G->C1->C2->C3->C4->C5->C6->C7->C8
//
// Expected head header : C8
// Expected head fast block: C8
// Expected head block : C8
// Expected snapshot disk : G
test := &snapshotTest{
snapshotTestBasic{
chainBlocks: 8,
snapshotBlock: 0,
commitBlock: 0,
expCanonicalBlocks: 8,
expHeadHeader: 8,
expHeadFastBlock: 8,
expHeadBlock: 8,
expSnapshotBottom: 0, // Initial disk layer built from genesis
},
}
test.test(t)
test.teardown()
}
// Tests a Geth was crashed and restarts with a broken snapshot. In this case the
// chain head should be rewound to the point with available state. And also the
// new head should must be lower than disk layer. But there is no committed point
// so the chain should be rewound to genesis and the disk layer should be left
// for recovery.
func TestNoCommitCrashWithNewSnapshot(t *testing.T) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
//
// Commit: G
// Snapshot: G, C4
//
// CRASH
//
// ------------------------------
//
// Expected in leveldb:
// G->C1->C2->C3->C4->C5->C6->C7->C8
//
// Expected head header : C8
// Expected head fast block: C8
// Expected head block : G
// Expected snapshot disk : C4
test := &crashSnapshotTest{
snapshotTestBasic{
chainBlocks: 8,
snapshotBlock: 4,
commitBlock: 0,
expCanonicalBlocks: 8,
expHeadHeader: 8,
expHeadFastBlock: 8,
expHeadBlock: 0,
expSnapshotBottom: 4, // Last committed disk layer, wait recovery
},
}
test.test(t)
test.teardown()
}
// Tests a Geth was crashed and restarts with a broken snapshot. In this case the
// chain head should be rewound to the point with available state. And also the
// new head should must be lower than disk layer. But there is only a low committed
// point so the chain should be rewound to committed point and the disk layer
// should be left for recovery.
func TestLowCommitCrashWithNewSnapshot(t *testing.T) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
//
// Commit: G, C2
// Snapshot: G, C4
//
// CRASH
//
// ------------------------------
//
// Expected in leveldb:
// G->C1->C2->C3->C4->C5->C6->C7->C8
//
// Expected head header : C8
// Expected head fast block: C8
// Expected head block : C2
// Expected snapshot disk : C4
test := &crashSnapshotTest{
snapshotTestBasic{
chainBlocks: 8,
snapshotBlock: 4,
commitBlock: 2,
expCanonicalBlocks: 8,
expHeadHeader: 8,
expHeadFastBlock: 8,
expHeadBlock: 2,
expSnapshotBottom: 4, // Last committed disk layer, wait recovery
},
}
test.test(t)
test.teardown()
}
// Tests a Geth was crashed and restarts with a broken snapshot. In this case
// the chain head should be rewound to the point with available state. And also
// the new head should must be lower than disk layer. But there is only a high
// committed point so the chain should be rewound to genesis and the disk layer
// should be left for recovery.
func TestHighCommitCrashWithNewSnapshot(t *testing.T) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
//
// Commit: G, C6
// Snapshot: G, C4
//
// CRASH
//
// ------------------------------
//
// Expected in leveldb:
// G->C1->C2->C3->C4->C5->C6->C7->C8
//
// Expected head header : C8
// Expected head fast block: C8
// Expected head block : G
// Expected snapshot disk : C4
test := &crashSnapshotTest{
snapshotTestBasic{
chainBlocks: 8,
snapshotBlock: 4,
commitBlock: 6,
expCanonicalBlocks: 8,
expHeadHeader: 8,
expHeadFastBlock: 8,
expHeadBlock: 0,
expSnapshotBottom: 4, // Last committed disk layer, wait recovery
},
}
test.test(t)
test.teardown()
}
// Tests a Geth was running with snapshot enabled. Then restarts without
// enabling snapshot and after that re-enable the snapshot again. In this
// case the snapshot should be rebuilt with latest chain head.
func TestGappedNewSnapshot(t *testing.T) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
//
// Commit: G
// Snapshot: G
//
// SetHead(0)
//
// ------------------------------
//
// Expected in leveldb:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10
//
// Expected head header : C10
// Expected head fast block: C10
// Expected head block : C10
// Expected snapshot disk : C10
test := &gappedSnapshotTest{
snapshotTestBasic: snapshotTestBasic{
chainBlocks: 8,
snapshotBlock: 0,
commitBlock: 0,
expCanonicalBlocks: 10,
expHeadHeader: 10,
expHeadFastBlock: 10,
expHeadBlock: 10,
expSnapshotBottom: 10, // Rebuilt snapshot from the latest HEAD
},
gapped: 2,
}
test.test(t)
test.teardown()
}
// Tests the Geth was running with snapshot enabled and resetHead is applied.
// In this case the head is rewound to the target(with state available). After
// that the chain is restarted and the original disk layer is kept.
func TestSetHeadWithNewSnapshot(t *testing.T) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
//
// Commit: G
// Snapshot: G
//
// SetHead(4)
//
// ------------------------------
//
// Expected in leveldb:
// G->C1->C2->C3->C4
//
// Expected head header : C4
// Expected head fast block: C4
// Expected head block : C4
// Expected snapshot disk : G
test := &setHeadSnapshotTest{
snapshotTestBasic: snapshotTestBasic{
chainBlocks: 8,
snapshotBlock: 0,
commitBlock: 0,
expCanonicalBlocks: 4,
expHeadHeader: 4,
expHeadFastBlock: 4,
expHeadBlock: 4,
expSnapshotBottom: 0, // The initial disk layer is built from the genesis
},
setHead: 4,
}
test.test(t)
test.teardown()
}
// Tests the Geth was running with a complete snapshot and then imports a few
// more new blocks on top without enabling the snapshot. After the restart,
// crash happens. Check everything is ok after the restart.
func TestRecoverSnapshotFromWipingCrash(t *testing.T) {
// Chain:
// G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD)
//
// Commit: G
// Snapshot: G
//
// SetHead(0)
//
// ------------------------------
//
// Expected in leveldb:
// G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10
//
// Expected head header : C10
// Expected head fast block: C10
// Expected head block : C8
// Expected snapshot disk : C10
test := &wipeCrashSnapshotTest{
snapshotTestBasic: snapshotTestBasic{
chainBlocks: 8,
snapshotBlock: 4,
commitBlock: 0,
expCanonicalBlocks: 10,
expHeadHeader: 10,
expHeadFastBlock: 10,
expHeadBlock: 10,
expSnapshotBottom: 10,
},
newBlocks: 2,
}
test.test(t)
test.teardown()
}