mirror of https://github.com/ethereum/go-ethereum
parent
f300c0df01
commit
351a5903b0
@ -1,137 +0,0 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snapshot |
||||
|
||||
import ( |
||||
"bufio" |
||||
"fmt" |
||||
"io" |
||||
"os" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
) |
||||
|
||||
// journalAccount is an account entry in a diffLayer's disk journal.
|
||||
type journalAccount struct { |
||||
Hash common.Hash |
||||
Blob []byte |
||||
} |
||||
|
||||
// journalStorage is an account's storage map in a diffLayer's disk journal.
|
||||
type journalStorage struct { |
||||
Hash common.Hash |
||||
Keys []common.Hash |
||||
Vals [][]byte |
||||
} |
||||
|
||||
// loadDiffLayer reads the next sections of a snapshot journal, reconstructing a new
|
||||
// diff and verifying that it can be linked to the requested parent.
|
||||
func loadDiffLayer(parent snapshot, r *rlp.Stream) (snapshot, error) { |
||||
// Read the next diff journal entry
|
||||
var root common.Hash |
||||
if err := r.Decode(&root); err != nil { |
||||
// The first read may fail with EOF, marking the end of the journal
|
||||
if err == io.EOF { |
||||
return parent, nil |
||||
} |
||||
return nil, fmt.Errorf("load diff root: %v", err) |
||||
} |
||||
var accounts []journalAccount |
||||
if err := r.Decode(&accounts); err != nil { |
||||
return nil, fmt.Errorf("load diff accounts: %v", err) |
||||
} |
||||
accountData := make(map[common.Hash][]byte) |
||||
for _, entry := range accounts { |
||||
accountData[entry.Hash] = entry.Blob |
||||
} |
||||
var storage []journalStorage |
||||
if err := r.Decode(&storage); err != nil { |
||||
return nil, fmt.Errorf("load diff storage: %v", err) |
||||
} |
||||
storageData := make(map[common.Hash]map[common.Hash][]byte) |
||||
for _, entry := range storage { |
||||
slots := make(map[common.Hash][]byte) |
||||
for i, key := range entry.Keys { |
||||
slots[key] = entry.Vals[i] |
||||
} |
||||
storageData[entry.Hash] = slots |
||||
} |
||||
return loadDiffLayer(newDiffLayer(parent, root, accountData, storageData), r) |
||||
} |
||||
|
||||
// journal is the internal version of Journal that also returns the journal file
|
||||
// so subsequent layers know where to write to.
|
||||
func (dl *diffLayer) journal() (io.WriteCloser, error) { |
||||
// If we've reached the bottom, open the journal
|
||||
var writer io.WriteCloser |
||||
if parent, ok := dl.parent.(*diskLayer); ok { |
||||
file, err := os.Create(parent.journal) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
writer = file |
||||
} |
||||
// If we haven't reached the bottom yet, journal the parent first
|
||||
if writer == nil { |
||||
file, err := dl.parent.(*diffLayer).journal() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
writer = file |
||||
} |
||||
dl.lock.RLock() |
||||
defer dl.lock.RUnlock() |
||||
|
||||
if dl.stale { |
||||
writer.Close() |
||||
return nil, ErrSnapshotStale |
||||
} |
||||
// Everything below was journalled, persist this layer too
|
||||
buf := bufio.NewWriter(writer) |
||||
if err := rlp.Encode(buf, dl.root); err != nil { |
||||
buf.Flush() |
||||
writer.Close() |
||||
return nil, err |
||||
} |
||||
accounts := make([]journalAccount, 0, len(dl.accountData)) |
||||
for hash, blob := range dl.accountData { |
||||
accounts = append(accounts, journalAccount{Hash: hash, Blob: blob}) |
||||
} |
||||
if err := rlp.Encode(buf, accounts); err != nil { |
||||
buf.Flush() |
||||
writer.Close() |
||||
return nil, err |
||||
} |
||||
storage := make([]journalStorage, 0, len(dl.storageData)) |
||||
for hash, slots := range dl.storageData { |
||||
keys := make([]common.Hash, 0, len(slots)) |
||||
vals := make([][]byte, 0, len(slots)) |
||||
for key, val := range slots { |
||||
keys = append(keys, key) |
||||
vals = append(vals, val) |
||||
} |
||||
storage = append(storage, journalStorage{Hash: hash, Keys: keys, Vals: vals}) |
||||
} |
||||
if err := rlp.Encode(buf, storage); err != nil { |
||||
buf.Flush() |
||||
writer.Close() |
||||
return nil, err |
||||
} |
||||
buf.Flush() |
||||
return writer, nil |
||||
} |
@ -0,0 +1,433 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snapshot |
||||
|
||||
import ( |
||||
"bytes" |
||||
"testing" |
||||
|
||||
"github.com/VictoriaMetrics/fastcache" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/core/rawdb" |
||||
"github.com/ethereum/go-ethereum/ethdb/memorydb" |
||||
) |
||||
|
||||
// reverse reverses the contents of a byte slice. It's used to update random accs
|
||||
// with deterministic changes.
|
||||
func reverse(blob []byte) []byte { |
||||
res := make([]byte, len(blob)) |
||||
for i, b := range blob { |
||||
res[len(blob)-1-i] = b |
||||
} |
||||
return res |
||||
} |
||||
|
||||
// Tests that merging something into a disk layer persists it into the database
|
||||
// and invalidates any previously written and cached values.
|
||||
func TestDiskMerge(t *testing.T) { |
||||
// Create some accounts in the disk layer
|
||||
db := memorydb.New() |
||||
|
||||
var ( |
||||
accNoModNoCache = common.Hash{0x1} |
||||
accNoModCache = common.Hash{0x2} |
||||
accModNoCache = common.Hash{0x3} |
||||
accModCache = common.Hash{0x4} |
||||
accDelNoCache = common.Hash{0x5} |
||||
accDelCache = common.Hash{0x6} |
||||
conNoModNoCache = common.Hash{0x7} |
||||
conNoModNoCacheSlot = common.Hash{0x70} |
||||
conNoModCache = common.Hash{0x8} |
||||
conNoModCacheSlot = common.Hash{0x80} |
||||
conModNoCache = common.Hash{0x9} |
||||
conModNoCacheSlot = common.Hash{0x90} |
||||
conModCache = common.Hash{0xa} |
||||
conModCacheSlot = common.Hash{0xa0} |
||||
conDelNoCache = common.Hash{0xb} |
||||
conDelNoCacheSlot = common.Hash{0xb0} |
||||
conDelCache = common.Hash{0xc} |
||||
conDelCacheSlot = common.Hash{0xc0} |
||||
conNukeNoCache = common.Hash{0xd} |
||||
conNukeNoCacheSlot = common.Hash{0xd0} |
||||
conNukeCache = common.Hash{0xe} |
||||
conNukeCacheSlot = common.Hash{0xe0} |
||||
baseRoot = randomHash() |
||||
diffRoot = randomHash() |
||||
) |
||||
|
||||
rawdb.WriteAccountSnapshot(db, accNoModNoCache, accNoModNoCache[:]) |
||||
rawdb.WriteAccountSnapshot(db, accNoModCache, accNoModCache[:]) |
||||
rawdb.WriteAccountSnapshot(db, accModNoCache, accModNoCache[:]) |
||||
rawdb.WriteAccountSnapshot(db, accModCache, accModCache[:]) |
||||
rawdb.WriteAccountSnapshot(db, accDelNoCache, accDelNoCache[:]) |
||||
rawdb.WriteAccountSnapshot(db, accDelCache, accDelCache[:]) |
||||
|
||||
rawdb.WriteAccountSnapshot(db, conNoModNoCache, conNoModNoCache[:]) |
||||
rawdb.WriteStorageSnapshot(db, conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) |
||||
rawdb.WriteAccountSnapshot(db, conNoModCache, conNoModCache[:]) |
||||
rawdb.WriteStorageSnapshot(db, conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) |
||||
rawdb.WriteAccountSnapshot(db, conModNoCache, conModNoCache[:]) |
||||
rawdb.WriteStorageSnapshot(db, conModNoCache, conModNoCacheSlot, conModNoCacheSlot[:]) |
||||
rawdb.WriteAccountSnapshot(db, conModCache, conModCache[:]) |
||||
rawdb.WriteStorageSnapshot(db, conModCache, conModCacheSlot, conModCacheSlot[:]) |
||||
rawdb.WriteAccountSnapshot(db, conDelNoCache, conDelNoCache[:]) |
||||
rawdb.WriteStorageSnapshot(db, conDelNoCache, conDelNoCacheSlot, conDelNoCacheSlot[:]) |
||||
rawdb.WriteAccountSnapshot(db, conDelCache, conDelCache[:]) |
||||
rawdb.WriteStorageSnapshot(db, conDelCache, conDelCacheSlot, conDelCacheSlot[:]) |
||||
|
||||
rawdb.WriteAccountSnapshot(db, conNukeNoCache, conNukeNoCache[:]) |
||||
rawdb.WriteStorageSnapshot(db, conNukeNoCache, conNukeNoCacheSlot, conNukeNoCacheSlot[:]) |
||||
rawdb.WriteAccountSnapshot(db, conNukeCache, conNukeCache[:]) |
||||
rawdb.WriteStorageSnapshot(db, conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:]) |
||||
|
||||
rawdb.WriteSnapshotRoot(db, baseRoot) |
||||
|
||||
// Create a disk layer based on the above and cache in some data
|
||||
snaps := &Tree{ |
||||
layers: map[common.Hash]snapshot{ |
||||
baseRoot: &diskLayer{ |
||||
diskdb: db, |
||||
cache: fastcache.New(500 * 1024), |
||||
root: baseRoot, |
||||
}, |
||||
}, |
||||
} |
||||
base := snaps.Snapshot(baseRoot) |
||||
base.AccountRLP(accNoModCache) |
||||
base.AccountRLP(accModCache) |
||||
base.AccountRLP(accDelCache) |
||||
base.Storage(conNoModCache, conNoModCacheSlot) |
||||
base.Storage(conModCache, conModCacheSlot) |
||||
base.Storage(conDelCache, conDelCacheSlot) |
||||
base.Storage(conNukeCache, conNukeCacheSlot) |
||||
|
||||
// Modify or delete some accounts, flatten everything onto disk
|
||||
if err := snaps.Update(diffRoot, baseRoot, map[common.Hash][]byte{ |
||||
accModNoCache: reverse(accModNoCache[:]), |
||||
accModCache: reverse(accModCache[:]), |
||||
accDelNoCache: nil, |
||||
accDelCache: nil, |
||||
conNukeNoCache: nil, |
||||
conNukeCache: nil, |
||||
}, map[common.Hash]map[common.Hash][]byte{ |
||||
conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])}, |
||||
conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])}, |
||||
conDelNoCache: {conDelNoCacheSlot: nil}, |
||||
conDelCache: {conDelCacheSlot: nil}, |
||||
}); err != nil { |
||||
t.Fatalf("failed to update snapshot tree: %v", err) |
||||
} |
||||
if err := snaps.Cap(diffRoot, 0); err != nil { |
||||
t.Fatalf("failed to flatten snapshot tree: %v", err) |
||||
} |
||||
// Retrieve all the data through the disk layer and validate it
|
||||
base = snaps.Snapshot(diffRoot) |
||||
if _, ok := base.(*diskLayer); !ok { |
||||
t.Fatalf("update not flattend into the disk layer") |
||||
} |
||||
|
||||
// assertAccount ensures that an account matches the given blob.
|
||||
assertAccount := func(account common.Hash, data []byte) { |
||||
t.Helper() |
||||
blob, err := base.AccountRLP(account) |
||||
if err != nil { |
||||
t.Errorf("account access (%x) failed: %v", account, err) |
||||
} else if !bytes.Equal(blob, data) { |
||||
t.Errorf("account access (%x) mismatch: have %x, want %x", account, blob, data) |
||||
} |
||||
} |
||||
assertAccount(accNoModNoCache, accNoModNoCache[:]) |
||||
assertAccount(accNoModCache, accNoModCache[:]) |
||||
assertAccount(accModNoCache, reverse(accModNoCache[:])) |
||||
assertAccount(accModCache, reverse(accModCache[:])) |
||||
assertAccount(accDelNoCache, nil) |
||||
assertAccount(accDelCache, nil) |
||||
|
||||
// assertStorage ensures that a storage slot matches the given blob.
|
||||
assertStorage := func(account common.Hash, slot common.Hash, data []byte) { |
||||
t.Helper() |
||||
blob, err := base.Storage(account, slot) |
||||
if err != nil { |
||||
t.Errorf("storage access (%x:%x) failed: %v", account, slot, err) |
||||
} else if !bytes.Equal(blob, data) { |
||||
t.Errorf("storage access (%x:%x) mismatch: have %x, want %x", account, slot, blob, data) |
||||
} |
||||
} |
||||
assertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) |
||||
assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) |
||||
assertStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:])) |
||||
assertStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:])) |
||||
assertStorage(conDelNoCache, conDelNoCacheSlot, nil) |
||||
assertStorage(conDelCache, conDelCacheSlot, nil) |
||||
assertStorage(conNukeNoCache, conNukeNoCacheSlot, nil) |
||||
assertStorage(conNukeCache, conNukeCacheSlot, nil) |
||||
|
||||
// Retrieve all the data directly from the database and validate it
|
||||
|
||||
// assertDatabaseAccount ensures that an account from the database matches the given blob.
|
||||
assertDatabaseAccount := func(account common.Hash, data []byte) { |
||||
t.Helper() |
||||
if blob := rawdb.ReadAccountSnapshot(db, account); !bytes.Equal(blob, data) { |
||||
t.Errorf("account database access (%x) mismatch: have %x, want %x", account, blob, data) |
||||
} |
||||
} |
||||
assertDatabaseAccount(accNoModNoCache, accNoModNoCache[:]) |
||||
assertDatabaseAccount(accNoModCache, accNoModCache[:]) |
||||
assertDatabaseAccount(accModNoCache, reverse(accModNoCache[:])) |
||||
assertDatabaseAccount(accModCache, reverse(accModCache[:])) |
||||
assertDatabaseAccount(accDelNoCache, nil) |
||||
assertDatabaseAccount(accDelCache, nil) |
||||
|
||||
// assertDatabaseStorage ensures that a storage slot from the database matches the given blob.
|
||||
assertDatabaseStorage := func(account common.Hash, slot common.Hash, data []byte) { |
||||
t.Helper() |
||||
if blob := rawdb.ReadStorageSnapshot(db, account, slot); !bytes.Equal(blob, data) { |
||||
t.Errorf("storage database access (%x:%x) mismatch: have %x, want %x", account, slot, blob, data) |
||||
} |
||||
} |
||||
assertDatabaseStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) |
||||
assertDatabaseStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) |
||||
assertDatabaseStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:])) |
||||
assertDatabaseStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:])) |
||||
assertDatabaseStorage(conDelNoCache, conDelNoCacheSlot, nil) |
||||
assertDatabaseStorage(conDelCache, conDelCacheSlot, nil) |
||||
assertDatabaseStorage(conNukeNoCache, conNukeNoCacheSlot, nil) |
||||
assertDatabaseStorage(conNukeCache, conNukeCacheSlot, nil) |
||||
} |
||||
|
||||
// Tests that merging something into a disk layer persists it into the database
|
||||
// and invalidates any previously written and cached values, discarding anything
|
||||
// after the in-progress generation marker.
|
||||
func TestDiskPartialMerge(t *testing.T) { |
||||
// Iterate the test a few times to ensure we pick various internal orderings
|
||||
// for the data slots as well as the progress marker.
|
||||
for i := 0; i < 1024; i++ { |
||||
// Create some accounts in the disk layer
|
||||
db := memorydb.New() |
||||
|
||||
var ( |
||||
accNoModNoCache = randomHash() |
||||
accNoModCache = randomHash() |
||||
accModNoCache = randomHash() |
||||
accModCache = randomHash() |
||||
accDelNoCache = randomHash() |
||||
accDelCache = randomHash() |
||||
conNoModNoCache = randomHash() |
||||
conNoModNoCacheSlot = randomHash() |
||||
conNoModCache = randomHash() |
||||
conNoModCacheSlot = randomHash() |
||||
conModNoCache = randomHash() |
||||
conModNoCacheSlot = randomHash() |
||||
conModCache = randomHash() |
||||
conModCacheSlot = randomHash() |
||||
conDelNoCache = randomHash() |
||||
conDelNoCacheSlot = randomHash() |
||||
conDelCache = randomHash() |
||||
conDelCacheSlot = randomHash() |
||||
conNukeNoCache = randomHash() |
||||
conNukeNoCacheSlot = randomHash() |
||||
conNukeCache = randomHash() |
||||
conNukeCacheSlot = randomHash() |
||||
baseRoot = randomHash() |
||||
diffRoot = randomHash() |
||||
genMarker = append(randomHash().Bytes(), randomHash().Bytes()...) |
||||
) |
||||
|
||||
// insertAccount injects an account into the database if it's after the
|
||||
// generator marker, drops the op otherwise. This is needed to seed the
|
||||
// database with a valid starting snapshot.
|
||||
insertAccount := func(account common.Hash, data []byte) { |
||||
if bytes.Compare(account[:], genMarker) <= 0 { |
||||
rawdb.WriteAccountSnapshot(db, account, data[:]) |
||||
} |
||||
} |
||||
insertAccount(accNoModNoCache, accNoModNoCache[:]) |
||||
insertAccount(accNoModCache, accNoModCache[:]) |
||||
insertAccount(accModNoCache, accModNoCache[:]) |
||||
insertAccount(accModCache, accModCache[:]) |
||||
insertAccount(accDelNoCache, accDelNoCache[:]) |
||||
insertAccount(accDelCache, accDelCache[:]) |
||||
|
||||
// insertStorage injects a storage slot into the database if it's after
|
||||
// the generator marker, drops the op otherwise. This is needed to seed
|
||||
// the database with a valid starting snapshot.
|
||||
insertStorage := func(account common.Hash, slot common.Hash, data []byte) { |
||||
if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 { |
||||
rawdb.WriteStorageSnapshot(db, account, slot, data[:]) |
||||
} |
||||
} |
||||
insertAccount(conNoModNoCache, conNoModNoCache[:]) |
||||
insertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) |
||||
insertAccount(conNoModCache, conNoModCache[:]) |
||||
insertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) |
||||
insertAccount(conModNoCache, conModNoCache[:]) |
||||
insertStorage(conModNoCache, conModNoCacheSlot, conModNoCacheSlot[:]) |
||||
insertAccount(conModCache, conModCache[:]) |
||||
insertStorage(conModCache, conModCacheSlot, conModCacheSlot[:]) |
||||
insertAccount(conDelNoCache, conDelNoCache[:]) |
||||
insertStorage(conDelNoCache, conDelNoCacheSlot, conDelNoCacheSlot[:]) |
||||
insertAccount(conDelCache, conDelCache[:]) |
||||
insertStorage(conDelCache, conDelCacheSlot, conDelCacheSlot[:]) |
||||
|
||||
insertAccount(conNukeNoCache, conNukeNoCache[:]) |
||||
insertStorage(conNukeNoCache, conNukeNoCacheSlot, conNukeNoCacheSlot[:]) |
||||
insertAccount(conNukeCache, conNukeCache[:]) |
||||
insertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:]) |
||||
|
||||
rawdb.WriteSnapshotRoot(db, baseRoot) |
||||
|
||||
// Create a disk layer based on the above using a random progress marker
|
||||
// and cache in some data.
|
||||
snaps := &Tree{ |
||||
layers: map[common.Hash]snapshot{ |
||||
baseRoot: &diskLayer{ |
||||
diskdb: db, |
||||
cache: fastcache.New(500 * 1024), |
||||
root: baseRoot, |
||||
}, |
||||
}, |
||||
} |
||||
snaps.layers[baseRoot].(*diskLayer).genMarker = genMarker |
||||
base := snaps.Snapshot(baseRoot) |
||||
|
||||
// assertAccount ensures that an account matches the given blob if it's
|
||||
// already covered by the disk snapshot, and errors out otherwise.
|
||||
assertAccount := func(account common.Hash, data []byte) { |
||||
t.Helper() |
||||
blob, err := base.AccountRLP(account) |
||||
if bytes.Compare(account[:], genMarker) > 0 && err != ErrNotCoveredYet { |
||||
t.Fatalf("test %d: post-marker (%x) account access (%x) succeded: %x", i, genMarker, account, blob) |
||||
} |
||||
if bytes.Compare(account[:], genMarker) <= 0 && !bytes.Equal(blob, data) { |
||||
t.Fatalf("test %d: pre-marker (%x) account access (%x) mismatch: have %x, want %x", i, genMarker, account, blob, data) |
||||
} |
||||
} |
||||
assertAccount(accNoModCache, accNoModCache[:]) |
||||
assertAccount(accModCache, accModCache[:]) |
||||
assertAccount(accDelCache, accDelCache[:]) |
||||
|
||||
// assertStorage ensures that a storage slot matches the given blob if
|
||||
// it's already covered by the disk snapshot, and errors out otherwise.
|
||||
assertStorage := func(account common.Hash, slot common.Hash, data []byte) { |
||||
t.Helper() |
||||
blob, err := base.Storage(account, slot) |
||||
if bytes.Compare(append(account[:], slot[:]...), genMarker) > 0 && err != ErrNotCoveredYet { |
||||
t.Fatalf("test %d: post-marker (%x) storage access (%x:%x) succeded: %x", i, genMarker, account, slot, blob) |
||||
} |
||||
if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 && !bytes.Equal(blob, data) { |
||||
t.Fatalf("test %d: pre-marker (%x) storage access (%x:%x) mismatch: have %x, want %x", i, genMarker, account, slot, blob, data) |
||||
} |
||||
} |
||||
assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) |
||||
assertStorage(conModCache, conModCacheSlot, conModCacheSlot[:]) |
||||
assertStorage(conDelCache, conDelCacheSlot, conDelCacheSlot[:]) |
||||
assertStorage(conNukeCache, conNukeCacheSlot, conNukeCacheSlot[:]) |
||||
|
||||
// Modify or delete some accounts, flatten everything onto disk
|
||||
if err := snaps.Update(diffRoot, baseRoot, map[common.Hash][]byte{ |
||||
accModNoCache: reverse(accModNoCache[:]), |
||||
accModCache: reverse(accModCache[:]), |
||||
accDelNoCache: nil, |
||||
accDelCache: nil, |
||||
conNukeNoCache: nil, |
||||
conNukeCache: nil, |
||||
}, map[common.Hash]map[common.Hash][]byte{ |
||||
conModNoCache: {conModNoCacheSlot: reverse(conModNoCacheSlot[:])}, |
||||
conModCache: {conModCacheSlot: reverse(conModCacheSlot[:])}, |
||||
conDelNoCache: {conDelNoCacheSlot: nil}, |
||||
conDelCache: {conDelCacheSlot: nil}, |
||||
}); err != nil { |
||||
t.Fatalf("test %d: failed to update snapshot tree: %v", i, err) |
||||
} |
||||
if err := snaps.Cap(diffRoot, 0); err != nil { |
||||
t.Fatalf("test %d: failed to flatten snapshot tree: %v", i, err) |
||||
} |
||||
// Retrieve all the data through the disk layer and validate it
|
||||
base = snaps.Snapshot(diffRoot) |
||||
if _, ok := base.(*diskLayer); !ok { |
||||
t.Fatalf("test %d: update not flattend into the disk layer", i) |
||||
} |
||||
assertAccount(accNoModNoCache, accNoModNoCache[:]) |
||||
assertAccount(accNoModCache, accNoModCache[:]) |
||||
assertAccount(accModNoCache, reverse(accModNoCache[:])) |
||||
assertAccount(accModCache, reverse(accModCache[:])) |
||||
assertAccount(accDelNoCache, nil) |
||||
assertAccount(accDelCache, nil) |
||||
|
||||
assertStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) |
||||
assertStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) |
||||
assertStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:])) |
||||
assertStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:])) |
||||
assertStorage(conDelNoCache, conDelNoCacheSlot, nil) |
||||
assertStorage(conDelCache, conDelCacheSlot, nil) |
||||
assertStorage(conNukeNoCache, conNukeNoCacheSlot, nil) |
||||
assertStorage(conNukeCache, conNukeCacheSlot, nil) |
||||
|
||||
// Retrieve all the data directly from the database and validate it
|
||||
|
||||
// assertDatabaseAccount ensures that an account inside the database matches
|
||||
// the given blob if it's already covered by the disk snapshot, and does not
|
||||
// exist otherwise.
|
||||
assertDatabaseAccount := func(account common.Hash, data []byte) { |
||||
t.Helper() |
||||
blob := rawdb.ReadAccountSnapshot(db, account) |
||||
if bytes.Compare(account[:], genMarker) > 0 && blob != nil { |
||||
t.Fatalf("test %d: post-marker (%x) account database access (%x) succeded: %x", i, genMarker, account, blob) |
||||
} |
||||
if bytes.Compare(account[:], genMarker) <= 0 && !bytes.Equal(blob, data) { |
||||
t.Fatalf("test %d: pre-marker (%x) account database access (%x) mismatch: have %x, want %x", i, genMarker, account, blob, data) |
||||
} |
||||
} |
||||
assertDatabaseAccount(accNoModNoCache, accNoModNoCache[:]) |
||||
assertDatabaseAccount(accNoModCache, accNoModCache[:]) |
||||
assertDatabaseAccount(accModNoCache, reverse(accModNoCache[:])) |
||||
assertDatabaseAccount(accModCache, reverse(accModCache[:])) |
||||
assertDatabaseAccount(accDelNoCache, nil) |
||||
assertDatabaseAccount(accDelCache, nil) |
||||
|
||||
// assertDatabaseStorage ensures that a storage slot inside the database
|
||||
// matches the given blob if it's already covered by the disk snapshot,
|
||||
// and does not exist otherwise.
|
||||
assertDatabaseStorage := func(account common.Hash, slot common.Hash, data []byte) { |
||||
t.Helper() |
||||
blob := rawdb.ReadStorageSnapshot(db, account, slot) |
||||
if bytes.Compare(append(account[:], slot[:]...), genMarker) > 0 && blob != nil { |
||||
t.Fatalf("test %d: post-marker (%x) storage database access (%x:%x) succeded: %x", i, genMarker, account, slot, blob) |
||||
} |
||||
if bytes.Compare(append(account[:], slot[:]...), genMarker) <= 0 && !bytes.Equal(blob, data) { |
||||
t.Fatalf("test %d: pre-marker (%x) storage database access (%x:%x) mismatch: have %x, want %x", i, genMarker, account, slot, blob, data) |
||||
} |
||||
} |
||||
assertDatabaseStorage(conNoModNoCache, conNoModNoCacheSlot, conNoModNoCacheSlot[:]) |
||||
assertDatabaseStorage(conNoModCache, conNoModCacheSlot, conNoModCacheSlot[:]) |
||||
assertDatabaseStorage(conModNoCache, conModNoCacheSlot, reverse(conModNoCacheSlot[:])) |
||||
assertDatabaseStorage(conModCache, conModCacheSlot, reverse(conModCacheSlot[:])) |
||||
assertDatabaseStorage(conDelNoCache, conDelNoCacheSlot, nil) |
||||
assertDatabaseStorage(conDelCache, conDelCacheSlot, nil) |
||||
assertDatabaseStorage(conNukeNoCache, conNukeNoCacheSlot, nil) |
||||
assertDatabaseStorage(conNukeCache, conNukeCacheSlot, nil) |
||||
} |
||||
} |
||||
|
||||
// Tests that merging something into a disk layer persists it into the database
|
||||
// and invalidates any previously written and cached values, discarding anything
|
||||
// after the in-progress generation marker.
|
||||
//
|
||||
// This test case is a tiny specialized case of TestDiskPartialMerge, which tests
|
||||
// some very specific cornercases that random tests won't ever trigger.
|
||||
func TestDiskMidAccountPartialMerge(t *testing.T) { |
||||
} |
@ -0,0 +1,257 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snapshot |
||||
|
||||
import ( |
||||
"bufio" |
||||
"encoding/binary" |
||||
"errors" |
||||
"fmt" |
||||
"io" |
||||
"os" |
||||
"time" |
||||
|
||||
"github.com/VictoriaMetrics/fastcache" |
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/core/rawdb" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
"github.com/ethereum/go-ethereum/rlp" |
||||
"github.com/ethereum/go-ethereum/trie" |
||||
) |
||||
|
||||
// journalGenerator is a disk layer entry containing the generator progress marker.
|
||||
type journalGenerator struct { |
||||
Wiping bool // Whether the database was in progress of being wiped
|
||||
Done bool // Whether the generator finished creating the snapshot
|
||||
Marker []byte |
||||
Accounts uint64 |
||||
Slots uint64 |
||||
Storage uint64 |
||||
} |
||||
|
||||
// journalAccount is an account entry in a diffLayer's disk journal.
|
||||
type journalAccount struct { |
||||
Hash common.Hash |
||||
Blob []byte |
||||
} |
||||
|
||||
// journalStorage is an account's storage map in a diffLayer's disk journal.
|
||||
type journalStorage struct { |
||||
Hash common.Hash |
||||
Keys []common.Hash |
||||
Vals [][]byte |
||||
} |
||||
|
||||
// loadSnapshot loads a pre-existing state snapshot backed by a key-value store.
|
||||
func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, journal string, cache int, root common.Hash) (snapshot, error) { |
||||
// Retrieve the block number and hash of the snapshot, failing if no snapshot
|
||||
// is present in the database (or crashed mid-update).
|
||||
baseRoot := rawdb.ReadSnapshotRoot(diskdb) |
||||
if baseRoot == (common.Hash{}) { |
||||
return nil, errors.New("missing or corrupted snapshot") |
||||
} |
||||
base := &diskLayer{ |
||||
diskdb: diskdb, |
||||
triedb: triedb, |
||||
cache: fastcache.New(cache * 1024 * 1024), |
||||
root: baseRoot, |
||||
} |
||||
// Open the journal, it must exist since even for 0 layer it stores whether
|
||||
// we've already generated the snapshot or are in progress only
|
||||
file, err := os.Open(journal) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
r := rlp.NewStream(file, 0) |
||||
|
||||
// Read the snapshot generation progress for the disk layer
|
||||
var generator journalGenerator |
||||
if err := r.Decode(&generator); err != nil { |
||||
return nil, fmt.Errorf("failed to load snapshot progress marker: %v", err) |
||||
} |
||||
// Load all the snapshot diffs from the journal
|
||||
snapshot, err := loadDiffLayer(base, r) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
// Entire snapshot journal loaded, sanity check the head and return
|
||||
// Journal doesn't exist, don't worry if it's not supposed to
|
||||
if head := snapshot.Root(); head != root { |
||||
return nil, fmt.Errorf("head doesn't match snapshot: have %#x, want %#x", head, root) |
||||
} |
||||
// Everything loaded correctly, resume any suspended operations
|
||||
if !generator.Done { |
||||
// If the generator was still wiping, restart one from scratch (fine for
|
||||
// now as it's rare and the wiper deletes the stuff it touches anyway, so
|
||||
// restarting won't incur a lot of extra database hops.
|
||||
var wiper chan struct{} |
||||
if generator.Wiping { |
||||
log.Info("Resuming previous snapshot wipe") |
||||
wiper = wipeSnapshot(diskdb, false) |
||||
} |
||||
// Whether or not wiping was in progress, load any generator progress too
|
||||
base.genMarker = generator.Marker |
||||
if base.genMarker == nil { |
||||
base.genMarker = []byte{} |
||||
} |
||||
base.genAbort = make(chan chan *generatorStats) |
||||
|
||||
var origin uint64 |
||||
if len(generator.Marker) >= 8 { |
||||
origin = binary.BigEndian.Uint64(generator.Marker) |
||||
} |
||||
go base.generate(&generatorStats{ |
||||
wiping: wiper, |
||||
origin: origin, |
||||
start: time.Now(), |
||||
accounts: generator.Accounts, |
||||
slots: generator.Slots, |
||||
storage: common.StorageSize(generator.Storage), |
||||
}) |
||||
} |
||||
return snapshot, nil |
||||
} |
||||
|
||||
// loadDiffLayer reads the next sections of a snapshot journal, reconstructing a new
|
||||
// diff and verifying that it can be linked to the requested parent.
|
||||
func loadDiffLayer(parent snapshot, r *rlp.Stream) (snapshot, error) { |
||||
// Read the next diff journal entry
|
||||
var root common.Hash |
||||
if err := r.Decode(&root); err != nil { |
||||
// The first read may fail with EOF, marking the end of the journal
|
||||
if err == io.EOF { |
||||
return parent, nil |
||||
} |
||||
return nil, fmt.Errorf("load diff root: %v", err) |
||||
} |
||||
var accounts []journalAccount |
||||
if err := r.Decode(&accounts); err != nil { |
||||
return nil, fmt.Errorf("load diff accounts: %v", err) |
||||
} |
||||
accountData := make(map[common.Hash][]byte) |
||||
for _, entry := range accounts { |
||||
accountData[entry.Hash] = entry.Blob |
||||
} |
||||
var storage []journalStorage |
||||
if err := r.Decode(&storage); err != nil { |
||||
return nil, fmt.Errorf("load diff storage: %v", err) |
||||
} |
||||
storageData := make(map[common.Hash]map[common.Hash][]byte) |
||||
for _, entry := range storage { |
||||
slots := make(map[common.Hash][]byte) |
||||
for i, key := range entry.Keys { |
||||
slots[key] = entry.Vals[i] |
||||
} |
||||
storageData[entry.Hash] = slots |
||||
} |
||||
return loadDiffLayer(newDiffLayer(parent, root, accountData, storageData), r) |
||||
} |
||||
|
||||
// Journal is the internal version of Journal that also returns the journal file
|
||||
// so subsequent layers know where to write to.
|
||||
func (dl *diskLayer) Journal(path string) (io.WriteCloser, common.Hash, error) { |
||||
// If the snapshot is currenty being generated, abort it
|
||||
var stats *generatorStats |
||||
if dl.genAbort != nil { |
||||
abort := make(chan *generatorStats) |
||||
dl.genAbort <- abort |
||||
|
||||
if stats = <-abort; stats != nil { |
||||
stats.Log("Journalling in-progress snapshot", dl.genMarker) |
||||
} |
||||
} |
||||
// Ensure the layer didn't get stale
|
||||
dl.lock.RLock() |
||||
defer dl.lock.RUnlock() |
||||
|
||||
if dl.stale { |
||||
return nil, common.Hash{}, ErrSnapshotStale |
||||
} |
||||
// We've reached the bottom, open the journal
|
||||
file, err := os.Create(path) |
||||
if err != nil { |
||||
return nil, common.Hash{}, err |
||||
} |
||||
// Write out the generator marker
|
||||
entry := journalGenerator{ |
||||
Done: dl.genMarker == nil, |
||||
Marker: dl.genMarker, |
||||
} |
||||
if stats != nil { |
||||
entry.Wiping = (stats.wiping != nil) |
||||
entry.Accounts = stats.accounts |
||||
entry.Slots = stats.slots |
||||
entry.Storage = uint64(stats.storage) |
||||
} |
||||
if err := rlp.Encode(file, entry); err != nil { |
||||
file.Close() |
||||
return nil, common.Hash{}, err |
||||
} |
||||
return file, dl.root, nil |
||||
} |
||||
|
||||
// Journal is the internal version of Journal that also returns the journal file
|
||||
// so subsequent layers know where to write to.
|
||||
func (dl *diffLayer) Journal(path string) (io.WriteCloser, common.Hash, error) { |
||||
// Journal the parent first
|
||||
writer, base, err := dl.parent.Journal(path) |
||||
if err != nil { |
||||
return nil, common.Hash{}, err |
||||
} |
||||
// Ensure the layer didn't get stale
|
||||
dl.lock.RLock() |
||||
defer dl.lock.RUnlock() |
||||
|
||||
if dl.stale { |
||||
writer.Close() |
||||
return nil, common.Hash{}, ErrSnapshotStale |
||||
} |
||||
// Everything below was journalled, persist this layer too
|
||||
buf := bufio.NewWriter(writer) |
||||
if err := rlp.Encode(buf, dl.root); err != nil { |
||||
buf.Flush() |
||||
writer.Close() |
||||
return nil, common.Hash{}, err |
||||
} |
||||
accounts := make([]journalAccount, 0, len(dl.accountData)) |
||||
for hash, blob := range dl.accountData { |
||||
accounts = append(accounts, journalAccount{Hash: hash, Blob: blob}) |
||||
} |
||||
if err := rlp.Encode(buf, accounts); err != nil { |
||||
buf.Flush() |
||||
writer.Close() |
||||
return nil, common.Hash{}, err |
||||
} |
||||
storage := make([]journalStorage, 0, len(dl.storageData)) |
||||
for hash, slots := range dl.storageData { |
||||
keys := make([]common.Hash, 0, len(slots)) |
||||
vals := make([][]byte, 0, len(slots)) |
||||
for key, val := range slots { |
||||
keys = append(keys, key) |
||||
vals = append(vals, val) |
||||
} |
||||
storage = append(storage, journalStorage{Hash: hash, Keys: keys, Vals: vals}) |
||||
} |
||||
if err := rlp.Encode(buf, storage); err != nil { |
||||
buf.Flush() |
||||
writer.Close() |
||||
return nil, common.Hash{}, err |
||||
} |
||||
buf.Flush() |
||||
return writer, base, nil |
||||
} |
@ -0,0 +1,130 @@ |
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package snapshot |
||||
|
||||
import ( |
||||
"bytes" |
||||
"time" |
||||
|
||||
"github.com/ethereum/go-ethereum/common" |
||||
"github.com/ethereum/go-ethereum/core/rawdb" |
||||
"github.com/ethereum/go-ethereum/ethdb" |
||||
"github.com/ethereum/go-ethereum/log" |
||||
) |
||||
|
||||
// wipeSnapshot starts a goroutine to iterate over the entire key-value database
|
||||
// and delete all the data associated with the snapshot (accounts, storage,
|
||||
// metadata). After all is done, the snapshot range of the database is compacted
|
||||
// to free up unused data blocks.
|
||||
func wipeSnapshot(db ethdb.KeyValueStore, full bool) chan struct{} { |
||||
// Wipe the snapshot root marker synchronously
|
||||
if full { |
||||
rawdb.DeleteSnapshotRoot(db) |
||||
} |
||||
// Wipe everything else asynchronously
|
||||
wiper := make(chan struct{}, 1) |
||||
go func() { |
||||
if err := wipeContent(db); err != nil { |
||||
log.Error("Failed to wipe state snapshot", "err", err) // Database close will trigger this
|
||||
return |
||||
} |
||||
close(wiper) |
||||
}() |
||||
return wiper |
||||
} |
||||
|
||||
// wipeContent iterates over the entire key-value database and deletes all the
|
||||
// data associated with the snapshot (accounts, storage), but not the root hash
|
||||
// as the wiper is meant to run on a background thread but the root needs to be
|
||||
// removed in sync to avoid data races. After all is done, the snapshot range of
|
||||
// the database is compacted to free up unused data blocks.
|
||||
func wipeContent(db ethdb.KeyValueStore) error { |
||||
if err := wipeKeyRange(db, "accounts", rawdb.SnapshotAccountPrefix, len(rawdb.SnapshotAccountPrefix)+common.HashLength); err != nil { |
||||
return err |
||||
} |
||||
if err := wipeKeyRange(db, "storage", rawdb.SnapshotStoragePrefix, len(rawdb.SnapshotStoragePrefix)+2*common.HashLength); err != nil { |
||||
return err |
||||
} |
||||
// Compact the snapshot section of the database to get rid of unused space
|
||||
start := time.Now() |
||||
|
||||
log.Info("Compacting snapshot account area ") |
||||
end := common.CopyBytes(rawdb.SnapshotAccountPrefix) |
||||
end[len(end)-1]++ |
||||
|
||||
if err := db.Compact(rawdb.SnapshotAccountPrefix, end); err != nil { |
||||
return err |
||||
} |
||||
log.Info("Compacting snapshot storage area ") |
||||
end = common.CopyBytes(rawdb.SnapshotStoragePrefix) |
||||
end[len(end)-1]++ |
||||
|
||||
if err := db.Compact(rawdb.SnapshotStoragePrefix, end); err != nil { |
||||
return err |
||||
} |
||||
log.Info("Compacted snapshot area in database", "elapsed", common.PrettyDuration(time.Since(start))) |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// wipeKeyRange deletes a range of keys from the database starting with prefix
|
||||
// and having a specific total key length.
|
||||
func wipeKeyRange(db ethdb.KeyValueStore, kind string, prefix []byte, keylen int) error { |
||||
// Batch deletions together to avoid holding an iterator for too long
|
||||
var ( |
||||
batch = db.NewBatch() |
||||
items int |
||||
) |
||||
// Iterate over the key-range and delete all of them
|
||||
start, logged := time.Now(), time.Now() |
||||
|
||||
it := db.NewIteratorWithStart(prefix) |
||||
for it.Next() { |
||||
// Skip any keys with the correct prefix but wrong lenth (trie nodes)
|
||||
key := it.Key() |
||||
if !bytes.HasPrefix(key, prefix) { |
||||
break |
||||
} |
||||
if len(key) != keylen { |
||||
continue |
||||
} |
||||
// Delete the key and periodically recreate the batch and iterator
|
||||
batch.Delete(key) |
||||
items++ |
||||
|
||||
if items%10000 == 0 { |
||||
// Batch too large (or iterator too long lived, flush and recreate)
|
||||
it.Release() |
||||
if err := batch.Write(); err != nil { |
||||
return err |
||||
} |
||||
batch.Reset() |
||||
it = db.NewIteratorWithStart(key) |
||||
|
||||
if time.Since(logged) > 8*time.Second { |
||||
log.Info("Deleting state snapshot leftovers", "kind", kind, "wiped", items, "elapsed", common.PrettyDuration(time.Since(start))) |
||||
logged = time.Now() |
||||
} |
||||
} |
||||
} |
||||
it.Release() |
||||
if err := batch.Write(); err != nil { |
||||
return err |
||||
} |
||||
log.Info("Deleted state snapshot leftovers", "kind", kind, "wiped", items, "elapsed", common.PrettyDuration(time.Since(start))) |
||||
return nil |
||||
} |
Loading…
Reference in new issue