eth: fix megacheck warnings

pull/14933/head
Egon Elbre 7 years ago
parent 971079822e
commit 8f06b7980d
  1. 20
      eth/api.go
  2. 19
      eth/downloader/downloader_test.go
  3. 1
      eth/filters/api.go
  4. 3
      eth/filters/filter.go
  5. 1
      eth/filters/filter_system.go
  6. 7
      eth/filters/filter_system_test.go
  7. 6
      eth/sync.go

@ -465,26 +465,6 @@ func (api *PrivateDebugAPI) traceBlock(block *types.Block, logConfig *vm.LogConf
return true, structLogger.StructLogs(), nil
}
// callmsg is the message type used for call transitions.
type callmsg struct {
addr common.Address
to *common.Address
gas, gasPrice *big.Int
value *big.Int
data []byte
}
// accessor boilerplate to implement core.Message
func (m callmsg) From() (common.Address, error) { return m.addr, nil }
func (m callmsg) FromFrontier() (common.Address, error) { return m.addr, nil }
func (m callmsg) Nonce() uint64 { return 0 }
func (m callmsg) CheckNonce() bool { return false }
func (m callmsg) To() *common.Address { return m.to }
func (m callmsg) GasPrice() *big.Int { return m.gasPrice }
func (m callmsg) Gas() *big.Int { return m.gas }
func (m callmsg) Value() *big.Int { return m.value }
func (m callmsg) Data() []byte { return m.data }
// formatError formats a Go error into either an empty string or the data content
// of the error itself.
func formatError(err error) string {

@ -403,8 +403,7 @@ func (dl *downloadTester) newSlowPeer(id string, version int, hashes []common.Ha
dl.lock.Lock()
defer dl.lock.Unlock()
var err error
err = dl.downloader.RegisterPeer(id, version, &downloadTesterPeer{dl, id, delay})
var err = dl.downloader.RegisterPeer(id, version, &downloadTesterPeer{dl, id, delay})
if err == nil {
// Assign the owned hashes, headers and blocks to the peer (deep copy)
dl.peerHashes[id] = make([]common.Hash, len(hashes))
@ -1381,7 +1380,7 @@ func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
go func() {
defer pending.Done()
if err := tester.sync("peer-half", nil, mode); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
}
}()
<-starting
@ -1398,7 +1397,7 @@ func testSyncProgress(t *testing.T, protocol int, mode SyncMode) {
go func() {
defer pending.Done()
if err := tester.sync("peer-full", nil, mode); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
}
}()
<-starting
@ -1454,7 +1453,7 @@ func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
go func() {
defer pending.Done()
if err := tester.sync("fork A", nil, mode); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
}
}()
<-starting
@ -1474,7 +1473,7 @@ func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
go func() {
defer pending.Done()
if err := tester.sync("fork B", nil, mode); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
}
}()
<-starting
@ -1535,7 +1534,7 @@ func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
go func() {
defer pending.Done()
if err := tester.sync("faulty", nil, mode); err == nil {
t.Fatalf("succeeded faulty synchronisation")
panic("succeeded faulty synchronisation")
}
}()
<-starting
@ -1552,7 +1551,7 @@ func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
go func() {
defer pending.Done()
if err := tester.sync("valid", nil, mode); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
}
}()
<-starting
@ -1613,7 +1612,7 @@ func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
go func() {
defer pending.Done()
if err := tester.sync("attack", nil, mode); err == nil {
t.Fatalf("succeeded attacker synchronisation")
panic("succeeded attacker synchronisation")
}
}()
<-starting
@ -1630,7 +1629,7 @@ func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) {
go func() {
defer pending.Done()
if err := tester.sync("valid", nil, mode); err != nil {
t.Fatalf("failed to synchronise blocks: %v", err)
panic(fmt.Sprintf("failed to synchronise blocks: %v", err))
}
}()
<-starting

@ -54,7 +54,6 @@ type PublicFilterAPI struct {
backend Backend
useMipMap bool
mux *event.TypeMux
quit chan struct{}
chainDb ethdb.Database
events *EventSystem
filtersMu sync.Mutex

@ -20,7 +20,6 @@ import (
"context"
"math"
"math/big"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
@ -42,8 +41,6 @@ type Filter struct {
backend Backend
useMipMap bool
created time.Time
db ethdb.Database
begin, end int64
addresses []common.Address

@ -74,7 +74,6 @@ type subscription struct {
// subscription which match the subscription criteria.
type EventSystem struct {
mux *event.TypeMux
sub *event.TypeMuxSubscription
backend Backend
lightMode bool
lastHead *types.Header

@ -18,6 +18,7 @@ package filters
import (
"context"
"fmt"
"math/big"
"reflect"
"testing"
@ -439,15 +440,15 @@ func TestPendingLogsSubscription(t *testing.T) {
}
if len(fetched) != len(tt.expected) {
t.Fatalf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched))
panic(fmt.Sprintf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched)))
}
for l := range fetched {
if fetched[l].Removed {
t.Errorf("expected log not to be removed for log %d in case %d", l, i)
panic(fmt.Sprintf("expected log not to be removed for log %d in case %d", l, i))
}
if !reflect.DeepEqual(fetched[l], tt.expected[l]) {
t.Errorf("invalid log on index %d for case %d", l, i)
panic(fmt.Sprintf("invalid log on index %d for case %d", l, i))
}
}
}()

@ -138,7 +138,9 @@ func (pm *ProtocolManager) syncer() {
defer pm.downloader.Terminate()
// Wait for different events to fire synchronisation operations
forceSync := time.Tick(forceSyncCycle)
forceSync := time.NewTicker(forceSyncCycle)
defer forceSync.Stop()
for {
select {
case <-pm.newPeerCh:
@ -148,7 +150,7 @@ func (pm *ProtocolManager) syncer() {
}
go pm.synchronise(pm.peers.BestPeer())
case <-forceSync:
case <-forceSync.C:
// Force a sync even if not enough peers are present
go pm.synchronise(pm.peers.BestPeer())

Loading…
Cancel
Save