Official Go implementation of the Ethereum protocol
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
go-ethereum/swarm/storage/localstore/localstore_test.go

454 lines
13 KiB

// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package localstore
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"math/rand"
"os"
"runtime"
"sort"
"sync"
"testing"
"time"
"github.com/ethereum/go-ethereum/swarm/chunk"
"github.com/ethereum/go-ethereum/swarm/shed"
"github.com/syndtr/goleveldb/leveldb"
)
func init() {
// Some of the tests in localstore package rely on the same ordering of
// items uploaded or accessed compared to the ordering of items in indexes
// that contain StoreTimestamp or AccessTimestamp in keys. In tests
// where the same order is required from the database as the order
// in which chunks are put or accessed, if the StoreTimestamp or
// AccessTimestamp are the same for two or more sequential items
// their order in database will be based on the chunk address value,
// in which case the ordering of items/chunks stored in a test slice
// will not be the same. To ensure the same ordering in database on such
// indexes on windows systems, an additional short sleep is added to
// the now function.
if runtime.GOOS == "windows" {
setNow(func() int64 {
time.Sleep(time.Microsecond)
return time.Now().UTC().UnixNano()
})
}
}
// TestDB validates if the chunk can be uploaded and
// correctly retrieved.
func TestDB(t *testing.T) {
db, cleanupFunc := newTestDB(t, nil)
defer cleanupFunc()
ch := generateTestRandomChunk()
_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
got, err := db.Get(context.Background(), chunk.ModeGetRequest, ch.Address())
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(got.Address(), ch.Address()) {
t.Errorf("got address %x, want %x", got.Address(), ch.Address())
}
if !bytes.Equal(got.Data(), ch.Data()) {
t.Errorf("got data %x, want %x", got.Data(), ch.Data())
}
}
// TestDB_updateGCSem tests maxParallelUpdateGC limit.
// This test temporary sets the limit to a low number,
// makes updateGC function execution time longer by
// setting a custom testHookUpdateGC function with a sleep
// and a count current and maximal number of goroutines.
func TestDB_updateGCSem(t *testing.T) {
updateGCSleep := time.Second
var count int
var max int
var mu sync.Mutex
defer setTestHookUpdateGC(func() {
mu.Lock()
// add to the count of current goroutines
count++
if count > max {
// set maximal detected numbers of goroutines
max = count
}
mu.Unlock()
// wait for some time to ensure multiple parallel goroutines
time.Sleep(updateGCSleep)
mu.Lock()
count--
mu.Unlock()
})()
defer func(m int) { maxParallelUpdateGC = m }(maxParallelUpdateGC)
maxParallelUpdateGC = 3
db, cleanupFunc := newTestDB(t, nil)
defer cleanupFunc()
ch := generateTestRandomChunk()
_, err := db.Put(context.Background(), chunk.ModePutUpload, ch)
if err != nil {
t.Fatal(err)
}
// get more chunks then maxParallelUpdateGC
// in time shorter then updateGCSleep
for i := 0; i < 5; i++ {
_, err = db.Get(context.Background(), chunk.ModeGetRequest, ch.Address())
if err != nil {
t.Fatal(err)
}
}
if max != maxParallelUpdateGC {
t.Errorf("got max %v, want %v", max, maxParallelUpdateGC)
}
}
// newTestDB is a helper function that constructs a
// temporary database and returns a cleanup function that must
// be called to remove the data.
func newTestDB(t testing.TB, o *Options) (db *DB, cleanupFunc func()) {
t.Helper()
dir, err := ioutil.TempDir("", "localstore-test")
if err != nil {
t.Fatal(err)
}
cleanupFunc = func() { os.RemoveAll(dir) }
baseKey := make([]byte, 32)
if _, err := rand.Read(baseKey); err != nil {
t.Fatal(err)
}
db, err = New(dir, baseKey, o)
if err != nil {
cleanupFunc()
t.Fatal(err)
}
cleanupFunc = func() {
err := db.Close()
if err != nil {
t.Error(err)
}
os.RemoveAll(dir)
}
return db, cleanupFunc
}
func init() {
// needed for generateTestRandomChunk
rand.Seed(time.Now().UnixNano())
}
// generateTestRandomChunk generates a Chunk that is not
// valid, but it contains a random key and a random value.
// This function is faster then storage.generateTestRandomChunk
// which generates a valid chunk.
// Some tests in this package do not need valid chunks, just
// random data, and their execution time can be decreased
// using this function.
func generateTestRandomChunk() chunk.Chunk {
data := make([]byte, chunk.DefaultSize)
rand.Read(data)
key := make([]byte, 32)
rand.Read(key)
return chunk.NewChunk(key, data)
}
// TestGenerateTestRandomChunk validates that
// generateTestRandomChunk returns random data by comparing
// two generated chunks.
func TestGenerateTestRandomChunk(t *testing.T) {
c1 := generateTestRandomChunk()
c2 := generateTestRandomChunk()
addrLen := len(c1.Address())
if addrLen != 32 {
t.Errorf("first chunk address length %v, want %v", addrLen, 32)
}
dataLen := len(c1.Data())
if dataLen != chunk.DefaultSize {
t.Errorf("first chunk data length %v, want %v", dataLen, chunk.DefaultSize)
}
addrLen = len(c2.Address())
if addrLen != 32 {
t.Errorf("second chunk address length %v, want %v", addrLen, 32)
}
dataLen = len(c2.Data())
if dataLen != chunk.DefaultSize {
t.Errorf("second chunk data length %v, want %v", dataLen, chunk.DefaultSize)
}
if bytes.Equal(c1.Address(), c2.Address()) {
t.Error("fake chunks addresses do not differ")
}
if bytes.Equal(c1.Data(), c2.Data()) {
t.Error("fake chunks data bytes do not differ")
}
}
// newRetrieveIndexesTest returns a test function that validates if the right
// chunk values are in the retrieval indexes.
func newRetrieveIndexesTest(db *DB, chunk chunk.Chunk, storeTimestamp, accessTimestamp int64) func(t *testing.T) {
return func(t *testing.T) {
item, err := db.retrievalDataIndex.Get(addressToItem(chunk.Address()))
if err != nil {
t.Fatal(err)
}
validateItem(t, item, chunk.Address(), chunk.Data(), storeTimestamp, 0)
// access index should not be set
wantErr := leveldb.ErrNotFound
item, err = db.retrievalAccessIndex.Get(addressToItem(chunk.Address()))
if err != wantErr {
t.Errorf("got error %v, want %v", err, wantErr)
}
}
}
// newRetrieveIndexesTestWithAccess returns a test function that validates if the right
// chunk values are in the retrieval indexes when access time must be stored.
func newRetrieveIndexesTestWithAccess(db *DB, ch chunk.Chunk, storeTimestamp, accessTimestamp int64) func(t *testing.T) {
return func(t *testing.T) {
item, err := db.retrievalDataIndex.Get(addressToItem(ch.Address()))
if err != nil {
t.Fatal(err)
}
validateItem(t, item, ch.Address(), ch.Data(), storeTimestamp, 0)
if accessTimestamp > 0 {
item, err = db.retrievalAccessIndex.Get(addressToItem(ch.Address()))
if err != nil {
t.Fatal(err)
}
validateItem(t, item, ch.Address(), nil, 0, accessTimestamp)
}
}
}
// newPullIndexTest returns a test function that validates if the right
// chunk values are in the pull index.
func newPullIndexTest(db *DB, ch chunk.Chunk, binID uint64, wantError error) func(t *testing.T) {
return func(t *testing.T) {
item, err := db.pullIndex.Get(shed.Item{
Address: ch.Address(),
BinID: binID,
})
if err != wantError {
t.Errorf("got error %v, want %v", err, wantError)
}
if err == nil {
validateItem(t, item, ch.Address(), nil, 0, 0)
}
}
}
// newPushIndexTest returns a test function that validates if the right
// chunk values are in the push index.
func newPushIndexTest(db *DB, ch chunk.Chunk, storeTimestamp int64, wantError error) func(t *testing.T) {
return func(t *testing.T) {
item, err := db.pushIndex.Get(shed.Item{
Address: ch.Address(),
StoreTimestamp: storeTimestamp,
})
if err != wantError {
t.Errorf("got error %v, want %v", err, wantError)
}
if err == nil {
validateItem(t, item, ch.Address(), nil, storeTimestamp, 0)
}
}
}
// newGCIndexTest returns a test function that validates if the right
// chunk values are in the push index.
func newGCIndexTest(db *DB, chunk chunk.Chunk, storeTimestamp, accessTimestamp int64, binID uint64) func(t *testing.T) {
return func(t *testing.T) {
item, err := db.gcIndex.Get(shed.Item{
Address: chunk.Address(),
BinID: binID,
AccessTimestamp: accessTimestamp,
})
if err != nil {
t.Fatal(err)
}
validateItem(t, item, chunk.Address(), nil, 0, accessTimestamp)
}
}
// newItemsCountTest returns a test function that validates if
// an index contains expected number of key/value pairs.
func newItemsCountTest(i shed.Index, want int) func(t *testing.T) {
return func(t *testing.T) {
var c int
err := i.Iterate(func(item shed.Item) (stop bool, err error) {
c++
return
}, nil)
if err != nil {
t.Fatal(err)
}
if c != want {
t.Errorf("got %v items in index, want %v", c, want)
}
}
}
// newIndexGCSizeTest retruns a test function that validates if DB.gcSize
// value is the same as the number of items in DB.gcIndex.
func newIndexGCSizeTest(db *DB) func(t *testing.T) {
return func(t *testing.T) {
swarm/storage/localstore: global batch write lock (#19245) * swarm/storage/localstore: most basic database * swarm/storage/localstore: fix typos and comments * swarm/shed: add uint64 field Dec and DecInBatch methods * swarm/storage/localstore: decrement size counter on ModeRemoval update * swarm/storage/localstore: unexport modeAccess and modeRemoval * swarm/storage/localstore: add WithRetrievalCompositeIndex * swarm/storage/localstore: add TestModeSyncing * swarm/storage/localstore: fix test name * swarm/storage/localstore: add TestModeUpload * swarm/storage/localstore: add TestModeRequest * swarm/storage/localstore: add TestModeSynced * swarm/storage/localstore: add TestModeAccess * swarm/storage/localstore: add TestModeRemoval * swarm/storage/localstore: add mock store option for chunk data * swarm/storage/localstore: add TestDB_pullIndex * swarm/storage/localstore: add TestDB_gcIndex * swarm/storage/localstore: change how batches are written * swarm/storage/localstore: add updateOnAccess function * swarm/storage/localhost: add DB.gcSize * swarm/storage/localstore: update comments * swarm/storage/localstore: add BenchmarkNew * swarm/storage/localstore: add retrieval tests benchmarks * swarm/storage/localstore: accessors redesign * swarm/storage/localstore: add semaphore for updateGC goroutine * swarm/storage/localstore: implement basic garbage collection * swarm/storage/localstore: optimize collectGarbage * swarm/storage/localstore: add more garbage collection tests cases * swarm/shed, swarm/storage/localstore: rename IndexItem to Item * swarm/shed: add Index.CountFrom * swarm/storage/localstore: persist gcSize * swarm/storage/localstore: remove composite retrieval index * swarm/shed: IterateWithPrefix and IterateWithPrefixFrom Index functions * swarm/storage/localstore: writeGCSize function with leveldb batch * swarm/storage/localstore: unexport modeSetRemove * swarm/storage/localstore: update writeGCSizeWorker comment * swarm/storage/localstore: add triggerGarbageCollection function * swarm/storage/localstore: call writeGCSize on DB Close * swarm/storage/localstore: additional comment in writeGCSizeWorker * swarm/storage/localstore: add MetricsPrefix option * swarm/storage/localstore: fix a typo * swamr/shed: only one Index Iterate function * swarm/storage/localstore: use shed Iterate function * swarm/shed: pass a new byte slice copy to index decode functions * swarm/storage/localstore: implement feed subscriptions * swarm/storage/localstore: add more subscriptions tests * swarm/storage/localsore: add parallel upload test * swarm/storage/localstore: use storage.MaxPO in subscription tests * swarm/storage/localstore: subscription of addresses instead chunks * swarm/storage/localstore: lock item address in collectGarbage iterator * swarm/storage/localstore: fix TestSubscribePull to include MaxPO * swarm/storage/localstore: improve subscriptions * swarm/storage/localstore: add TestDB_SubscribePull_sinceAndUntil test * swarm/storage/localstore: adjust pull sync tests * swarm/storage/localstore: remove writeGCSizeDelay and use literal * swarm/storage/localstore: adjust subscriptions tests delays and comments * swarm/storage/localstore: add godoc package overview * swarm/storage/localstore: fix a typo * swarm/storage/localstore: update package overview * swarm/storage/localstore: remove repeated index change * swarm/storage/localstore: rename ChunkInfo to ChunkDescriptor * swarm/storage/localstore: add comment in collectGarbageWorker * swarm/storage/localstore: replace atomics with mutexes for gcSize and tests * swarm/storage/localstore: protect addrs map in pull subs tests * swarm/storage/localstore: protect slices in push subs test * swarm/storage/localstore: protect chunks in TestModePutUpload_parallel * swarm/storage/localstore: fix a race in TestDB_updateGCSem defers * swarm/storage/localstore: remove parallel flag from tests * swarm/storage/localstore: fix a race in testDB_collectGarbageWorker * swarm/storage/localstore: remove unused code * swarm/storage/localstore: add more context to pull sub log messages * swarm/storage/localstore: BenchmarkPutUpload and global lock option * swarm/storage/localstore: pre-generate chunks in BenchmarkPutUpload * swarm/storage/localstore: correct useGlobalLock in collectGarbage * swarm/storage/localstore: fix typos and update comments * swarm/storage/localstore: update writeGCSize comment * swarm/storage/localstore: global batch write lock * swarm/storage/localstore: remove global lock option * swarm/storage/localstore: simplify DB.Close
6 years ago
var want uint64
err := db.gcIndex.Iterate(func(item shed.Item) (stop bool, err error) {
want++
return
}, nil)
if err != nil {
t.Fatal(err)
}
swarm/storage/localstore: global batch write lock (#19245) * swarm/storage/localstore: most basic database * swarm/storage/localstore: fix typos and comments * swarm/shed: add uint64 field Dec and DecInBatch methods * swarm/storage/localstore: decrement size counter on ModeRemoval update * swarm/storage/localstore: unexport modeAccess and modeRemoval * swarm/storage/localstore: add WithRetrievalCompositeIndex * swarm/storage/localstore: add TestModeSyncing * swarm/storage/localstore: fix test name * swarm/storage/localstore: add TestModeUpload * swarm/storage/localstore: add TestModeRequest * swarm/storage/localstore: add TestModeSynced * swarm/storage/localstore: add TestModeAccess * swarm/storage/localstore: add TestModeRemoval * swarm/storage/localstore: add mock store option for chunk data * swarm/storage/localstore: add TestDB_pullIndex * swarm/storage/localstore: add TestDB_gcIndex * swarm/storage/localstore: change how batches are written * swarm/storage/localstore: add updateOnAccess function * swarm/storage/localhost: add DB.gcSize * swarm/storage/localstore: update comments * swarm/storage/localstore: add BenchmarkNew * swarm/storage/localstore: add retrieval tests benchmarks * swarm/storage/localstore: accessors redesign * swarm/storage/localstore: add semaphore for updateGC goroutine * swarm/storage/localstore: implement basic garbage collection * swarm/storage/localstore: optimize collectGarbage * swarm/storage/localstore: add more garbage collection tests cases * swarm/shed, swarm/storage/localstore: rename IndexItem to Item * swarm/shed: add Index.CountFrom * swarm/storage/localstore: persist gcSize * swarm/storage/localstore: remove composite retrieval index * swarm/shed: IterateWithPrefix and IterateWithPrefixFrom Index functions * swarm/storage/localstore: writeGCSize function with leveldb batch * swarm/storage/localstore: unexport modeSetRemove * swarm/storage/localstore: update writeGCSizeWorker comment * swarm/storage/localstore: add triggerGarbageCollection function * swarm/storage/localstore: call writeGCSize on DB Close * swarm/storage/localstore: additional comment in writeGCSizeWorker * swarm/storage/localstore: add MetricsPrefix option * swarm/storage/localstore: fix a typo * swamr/shed: only one Index Iterate function * swarm/storage/localstore: use shed Iterate function * swarm/shed: pass a new byte slice copy to index decode functions * swarm/storage/localstore: implement feed subscriptions * swarm/storage/localstore: add more subscriptions tests * swarm/storage/localsore: add parallel upload test * swarm/storage/localstore: use storage.MaxPO in subscription tests * swarm/storage/localstore: subscription of addresses instead chunks * swarm/storage/localstore: lock item address in collectGarbage iterator * swarm/storage/localstore: fix TestSubscribePull to include MaxPO * swarm/storage/localstore: improve subscriptions * swarm/storage/localstore: add TestDB_SubscribePull_sinceAndUntil test * swarm/storage/localstore: adjust pull sync tests * swarm/storage/localstore: remove writeGCSizeDelay and use literal * swarm/storage/localstore: adjust subscriptions tests delays and comments * swarm/storage/localstore: add godoc package overview * swarm/storage/localstore: fix a typo * swarm/storage/localstore: update package overview * swarm/storage/localstore: remove repeated index change * swarm/storage/localstore: rename ChunkInfo to ChunkDescriptor * swarm/storage/localstore: add comment in collectGarbageWorker * swarm/storage/localstore: replace atomics with mutexes for gcSize and tests * swarm/storage/localstore: protect addrs map in pull subs tests * swarm/storage/localstore: protect slices in push subs test * swarm/storage/localstore: protect chunks in TestModePutUpload_parallel * swarm/storage/localstore: fix a race in TestDB_updateGCSem defers * swarm/storage/localstore: remove parallel flag from tests * swarm/storage/localstore: fix a race in testDB_collectGarbageWorker * swarm/storage/localstore: remove unused code * swarm/storage/localstore: add more context to pull sub log messages * swarm/storage/localstore: BenchmarkPutUpload and global lock option * swarm/storage/localstore: pre-generate chunks in BenchmarkPutUpload * swarm/storage/localstore: correct useGlobalLock in collectGarbage * swarm/storage/localstore: fix typos and update comments * swarm/storage/localstore: update writeGCSize comment * swarm/storage/localstore: global batch write lock * swarm/storage/localstore: remove global lock option * swarm/storage/localstore: simplify DB.Close
6 years ago
got, err := db.gcSize.Get()
if err != nil {
t.Fatal(err)
}
if got != want {
t.Errorf("got gc size %v, want %v", got, want)
}
}
}
// testIndexChunk embeds storageChunk with additional data that is stored
// in database. It is used for index values validations.
type testIndexChunk struct {
chunk.Chunk
binID uint64
}
// testItemsOrder tests the order of chunks in the index. If sortFunc is not nil,
// chunks will be sorted with it before validation.
func testItemsOrder(t *testing.T, i shed.Index, chunks []testIndexChunk, sortFunc func(i, j int) (less bool)) {
newItemsCountTest(i, len(chunks))(t)
if sortFunc != nil {
sort.Slice(chunks, sortFunc)
}
var cursor int
err := i.Iterate(func(item shed.Item) (stop bool, err error) {
want := chunks[cursor].Address()
got := item.Address
if !bytes.Equal(got, want) {
return true, fmt.Errorf("got address %x at position %v, want %x", got, cursor, want)
}
cursor++
return false, nil
}, nil)
if err != nil {
t.Fatal(err)
}
}
// validateItem is a helper function that checks Item values.
func validateItem(t *testing.T, item shed.Item, address, data []byte, storeTimestamp, accessTimestamp int64) {
t.Helper()
if !bytes.Equal(item.Address, address) {
t.Errorf("got item address %x, want %x", item.Address, address)
}
if !bytes.Equal(item.Data, data) {
t.Errorf("got item data %x, want %x", item.Data, data)
}
if item.StoreTimestamp != storeTimestamp {
t.Errorf("got item store timestamp %v, want %v", item.StoreTimestamp, storeTimestamp)
}
if item.AccessTimestamp != accessTimestamp {
t.Errorf("got item access timestamp %v, want %v", item.AccessTimestamp, accessTimestamp)
}
}
// setNow replaces now function and
// returns a function that will reset it to the
// value before the change.
func setNow(f func() int64) (reset func()) {
current := now
reset = func() { now = current }
now = f
return reset
}
// TestSetNow tests if setNow function changes now function
// correctly and if its reset function resets the original function.
func TestSetNow(t *testing.T) {
// set the current function after the test finishes
defer func(f func() int64) { now = f }(now)
// expected value for the unchanged function
var original int64 = 1
// expected value for the changed function
var changed int64 = 2
// define the original (unchanged) functions
now = func() int64 {
return original
}
// get the time
got := now()
// test if got variable is set correctly
if got != original {
t.Errorf("got now value %v, want %v", got, original)
}
// set the new function
reset := setNow(func() int64 {
return changed
})
// get the time
got = now()
// test if got variable is set correctly to changed value
if got != changed {
t.Errorf("got hook value %v, want %v", got, changed)
}
// set the function to the original one
reset()
// get the time
got = now()
// test if got variable is set correctly to original value
if got != original {
t.Errorf("got hook value %v, want %v", got, original)
}
}