From 35a7dcb162546f7f31cb6492f716cb93159218d7 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Fri, 6 Jan 2017 15:52:03 +0100 Subject: [PATCH 1/3] all: gofmt -w -s --- accounts/abi/abi_test.go | 28 ++-- build/ci.go | 2 +- cmd/swarm/main.go | 6 +- common/bytes.go | 2 +- common/math/dist_test.go | 36 ++--- core/blockchain.go | 2 +- core/blockchain_test.go | 2 +- core/state/iterator_test.go | 2 +- core/state/managed_state_test.go | 6 +- core/state/statedb.go | 6 +- core/state/sync_test.go | 4 +- core/tx_list.go | 2 +- core/tx_pool.go | 2 +- core/vm/jump_table.go | 260 +++++++++++++++--------------- crypto/ecies/asn1.go | 12 +- crypto/ecies/ecies_test.go | 6 +- crypto/sha3/sha3_test.go | 2 +- eth/backend_test.go | 4 +- eth/downloader/peer.go | 4 +- eth/downloader/queue.go | 4 +- eth/filters/api.go | 2 +- eth/filters/filter_system_test.go | 22 +-- eth/filters/filter_test.go | 12 +- eth/handler_test.go | 12 +- eth/protocol_test.go | 2 +- ethdb/memory_database.go | 2 +- ethstats/ethstats.go | 14 +- event/event_test.go | 4 +- event/filter/generic_filter.go | 2 +- les/fetcher.go | 2 +- les/flowcontrol/manager.go | 6 +- les/handler_test.go | 14 +- les/peer.go | 2 +- les/randselect_test.go | 2 +- les/serverpool.go | 2 +- les/sync.go | 12 +- les/txrelay.go | 2 +- light/lightchain.go | 2 +- light/txpool_test.go | 2 +- logger/glog/glog.go | 2 +- miner/worker.go | 2 +- mobile/p2p.go | 2 +- node/node_test.go | 12 +- p2p/discover/database_test.go | 4 +- p2p/discover/table.go | 2 +- p2p/discover/table_test.go | 26 +-- p2p/discv5/database_test.go | 4 +- p2p/discv5/net_test.go | 26 +-- p2p/discv5/sim_test.go | 6 +- p2p/discv5/ticket.go | 2 +- p2p/discv5/topic.go | 2 +- p2p/nat/natpmp.go | 2 +- p2p/peer_test.go | 2 +- rpc/json.go | 8 +- swarm/storage/dpa_test.go | 4 +- trie/encoding.go | 2 +- trie/iterator_test.go | 2 +- trie/sync_test.go | 4 +- whisper/whisperv2/filter.go | 2 +- whisper/whisperv2/peer.go | 2 +- whisper/whisperv2/peer_test.go | 2 +- whisper/whisperv2/topic_test.go | 52 +++--- whisper/whisperv5/peer.go | 2 +- whisper/whisperv5/whisper.go | 2 +- 64 files changed, 343 insertions(+), 343 deletions(-) diff --git a/accounts/abi/abi_test.go b/accounts/abi/abi_test.go index b1bfcb012a..1e5ee0efe5 100644 --- a/accounts/abi/abi_test.go +++ b/accounts/abi/abi_test.go @@ -67,10 +67,10 @@ func TestTypeCheck(t *testing.T) { {"uint16[3]", [4]uint16{1, 2, 3}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"}, {"uint16[3]", []uint16{1, 2, 3}, ""}, {"uint16[3]", []uint16{1, 2, 3, 4}, "abi: cannot use [4]uint16 as type [3]uint16 as argument"}, - {"address[]", []common.Address{common.Address{1}}, ""}, - {"address[1]", []common.Address{common.Address{1}}, ""}, - {"address[1]", [1]common.Address{common.Address{1}}, ""}, - {"address[2]", [1]common.Address{common.Address{1}}, "abi: cannot use [1]array as type [2]array as argument"}, + {"address[]", []common.Address{{1}}, ""}, + {"address[1]", []common.Address{{1}}, ""}, + {"address[1]", [1]common.Address{{1}}, ""}, + {"address[2]", [1]common.Address{{1}}, "abi: cannot use [1]array as type [2]array as argument"}, {"bytes32", [32]byte{}, ""}, {"bytes32", [33]byte{}, "abi: cannot use [33]uint8 as type [32]uint8 as argument"}, {"bytes32", common.Hash{1}, ""}, @@ -80,7 +80,7 @@ func TestTypeCheck(t *testing.T) { {"bytes", [2]byte{0, 1}, ""}, {"bytes", common.Hash{1}, ""}, {"string", "hello world", ""}, - {"bytes32[]", [][32]byte{[32]byte{}}, ""}, + {"bytes32[]", [][32]byte{{}}, ""}, {"function", [24]byte{}, ""}, } { typ, err := NewType(test.typ) @@ -343,8 +343,8 @@ func TestPack(t *testing.T) { {"uint16[]", []uint16{1, 2}, formatSliceOutput([]byte{1}, []byte{2})}, {"bytes20", [20]byte{1}, pad([]byte{1}, 32, false)}, {"uint256[]", []*big.Int{big.NewInt(1), big.NewInt(2)}, formatSliceOutput([]byte{1}, []byte{2})}, - {"address[]", []common.Address{common.Address{1}, common.Address{2}}, formatSliceOutput(pad([]byte{1}, 20, false), pad([]byte{2}, 20, false))}, - {"bytes32[]", []common.Hash{common.Hash{1}, common.Hash{2}}, formatSliceOutput(pad([]byte{1}, 32, false), pad([]byte{2}, 32, false))}, + {"address[]", []common.Address{{1}, {2}}, formatSliceOutput(pad([]byte{1}, 20, false), pad([]byte{2}, 20, false))}, + {"bytes32[]", []common.Hash{{1}, {2}}, formatSliceOutput(pad([]byte{1}, 32, false), pad([]byte{2}, 32, false))}, {"function", [24]byte{1}, pad([]byte{1}, 32, false)}, } { typ, err := NewType(test.typ) @@ -458,12 +458,12 @@ func TestReader(t *testing.T) { Uint256, _ := NewType("uint256") exp := ABI{ Methods: map[string]Method{ - "balance": Method{ + "balance": { "balance", true, nil, nil, }, - "send": Method{ + "send": { "send", false, []Argument{ - Argument{"amount", Uint256, false}, + {"amount", Uint256, false}, }, nil, }, }, @@ -562,7 +562,7 @@ func TestTestSlice(t *testing.T) { func TestMethodSignature(t *testing.T) { String, _ := NewType("string") - m := Method{"foo", false, []Argument{Argument{"bar", String, false}, Argument{"baz", String, false}}, nil} + m := Method{"foo", false, []Argument{{"bar", String, false}, {"baz", String, false}}, nil} exp := "foo(string,string)" if m.Sig() != exp { t.Error("signature mismatch", exp, "!=", m.Sig()) @@ -574,7 +574,7 @@ func TestMethodSignature(t *testing.T) { } uintt, _ := NewType("uint") - m = Method{"foo", false, []Argument{Argument{"bar", uintt, false}}, nil} + m = Method{"foo", false, []Argument{{"bar", uintt, false}}, nil} exp = "foo(uint256)" if m.Sig() != exp { t.Error("signature mismatch", exp, "!=", m.Sig()) @@ -779,8 +779,8 @@ func TestBareEvents(t *testing.T) { "balance": {false, nil}, "anon": {true, nil}, "args": {false, []Argument{ - Argument{Name: "arg0", Type: arg0, Indexed: false}, - Argument{Name: "arg1", Type: arg1, Indexed: true}, + {Name: "arg0", Type: arg0, Indexed: false}, + {Name: "arg1", Type: arg1, Indexed: true}, }}, } diff --git a/build/ci.go b/build/ci.go index 593fcd1513..d530c24ca0 100644 --- a/build/ci.go +++ b/build/ci.go @@ -195,7 +195,7 @@ func doInstall(cmdline []string) { if err != nil { log.Fatal(err) } - for name, _ := range pkgs { + for name := range pkgs { if name == "main" { gobuild := goToolArch(*arch, "build", buildFlags(env)...) gobuild.Args = append(gobuild.Args, "-v") diff --git a/cmd/swarm/main.go b/cmd/swarm/main.go index 954ad3b13b..71f53082ab 100644 --- a/cmd/swarm/main.go +++ b/cmd/swarm/main.go @@ -129,7 +129,7 @@ func init() { app.HideVersion = true // we have a command to print the version app.Copyright = "Copyright 2013-2016 The go-ethereum Authors" app.Commands = []cli.Command{ - cli.Command{ + { Action: version, Name: "version", Usage: "Print version numbers", @@ -138,7 +138,7 @@ func init() { The output of this command is supposed to be machine-readable. `, }, - cli.Command{ + { Action: upload, Name: "up", Usage: "upload a file or directory to swarm using the HTTP API", @@ -147,7 +147,7 @@ The output of this command is supposed to be machine-readable. "upload a file or directory to swarm using the HTTP API and prints the root hash", `, }, - cli.Command{ + { Action: hash, Name: "hash", Usage: "print the swarm hash of a file or directory", diff --git a/common/bytes.go b/common/bytes.go index b9fb3b2da6..cbceea8b5c 100644 --- a/common/bytes.go +++ b/common/bytes.go @@ -143,7 +143,7 @@ func Hex2BytesFixed(str string, flen int) []byte { return h } else { if len(h) > flen { - return h[len(h)-flen : len(h)] + return h[len(h)-flen:] } else { hh := make([]byte, flen) copy(hh[flen-len(h):flen], h[:]) diff --git a/common/math/dist_test.go b/common/math/dist_test.go index 826faea8b3..f5857b6f80 100644 --- a/common/math/dist_test.go +++ b/common/math/dist_test.go @@ -41,24 +41,24 @@ func TestSum(t *testing.T) { func TestDist(t *testing.T) { var vectors = []Vector{ - Vector{big.NewInt(1000), big.NewInt(1234)}, - Vector{big.NewInt(500), big.NewInt(10023)}, - Vector{big.NewInt(1034), big.NewInt(1987)}, - Vector{big.NewInt(1034), big.NewInt(1987)}, - Vector{big.NewInt(8983), big.NewInt(1977)}, - Vector{big.NewInt(98382), big.NewInt(1887)}, - Vector{big.NewInt(12398), big.NewInt(1287)}, - Vector{big.NewInt(12398), big.NewInt(1487)}, - Vector{big.NewInt(12398), big.NewInt(1987)}, - Vector{big.NewInt(12398), big.NewInt(128)}, - Vector{big.NewInt(12398), big.NewInt(1987)}, - Vector{big.NewInt(1398), big.NewInt(187)}, - Vector{big.NewInt(12328), big.NewInt(1927)}, - Vector{big.NewInt(12398), big.NewInt(1987)}, - Vector{big.NewInt(22398), big.NewInt(1287)}, - Vector{big.NewInt(1370), big.NewInt(1981)}, - Vector{big.NewInt(12398), big.NewInt(1957)}, - Vector{big.NewInt(42198), big.NewInt(1987)}, + {big.NewInt(1000), big.NewInt(1234)}, + {big.NewInt(500), big.NewInt(10023)}, + {big.NewInt(1034), big.NewInt(1987)}, + {big.NewInt(1034), big.NewInt(1987)}, + {big.NewInt(8983), big.NewInt(1977)}, + {big.NewInt(98382), big.NewInt(1887)}, + {big.NewInt(12398), big.NewInt(1287)}, + {big.NewInt(12398), big.NewInt(1487)}, + {big.NewInt(12398), big.NewInt(1987)}, + {big.NewInt(12398), big.NewInt(128)}, + {big.NewInt(12398), big.NewInt(1987)}, + {big.NewInt(1398), big.NewInt(187)}, + {big.NewInt(12328), big.NewInt(1927)}, + {big.NewInt(12398), big.NewInt(1987)}, + {big.NewInt(22398), big.NewInt(1287)}, + {big.NewInt(1370), big.NewInt(1981)}, + {big.NewInt(12398), big.NewInt(1957)}, + {big.NewInt(42198), big.NewInt(1987)}, } VectorsBy(GasSort).Sort(vectors) diff --git a/core/blockchain.go b/core/blockchain.go index 2081457a9e..3c9e1f7cbc 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -150,7 +150,7 @@ func NewBlockChain(chainDb ethdb.Database, config *params.ChainConfig, pow pow.P return nil, err } // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain - for hash, _ := range BadHashes { + for hash := range BadHashes { if header := bc.GetHeaderByHash(hash); header != nil { // get the canonical block corresponding to the offending header's number headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64()) diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 25733ce085..a5a83ba609 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -1107,7 +1107,7 @@ func TestCanonicalBlockRetrieval(t *testing.T) { chain, _ := GenerateChain(params.TestChainConfig, genesis, db, 10, func(i int, gen *BlockGen) {}) - for i, _ := range chain { + for i := range chain { go func(block *types.Block) { // try to retrieve a block by its canonical hash and see if the block data can be retrieved. for { diff --git a/core/state/iterator_test.go b/core/state/iterator_test.go index aa05c5dfe7..aa9c5b7288 100644 --- a/core/state/iterator_test.go +++ b/core/state/iterator_test.go @@ -41,7 +41,7 @@ func TestNodeIteratorCoverage(t *testing.T) { } } // Cross check the hashes and the database itself - for hash, _ := range hashes { + for hash := range hashes { if _, err := db.Get(hash.Bytes()); err != nil { t.Errorf("failed to retrieve reported node %x: %v", hash, err) } diff --git a/core/state/managed_state_test.go b/core/state/managed_state_test.go index 3f7bc2aa85..d9c232ebb9 100644 --- a/core/state/managed_state_test.go +++ b/core/state/managed_state_test.go @@ -52,7 +52,7 @@ func TestRemove(t *testing.T) { ms, account := create() nn := make([]bool, 10) - for i, _ := range nn { + for i := range nn { nn[i] = true } account.nonces = append(account.nonces, nn...) @@ -68,7 +68,7 @@ func TestReuse(t *testing.T) { ms, account := create() nn := make([]bool, 10) - for i, _ := range nn { + for i := range nn { nn[i] = true } account.nonces = append(account.nonces, nn...) @@ -84,7 +84,7 @@ func TestReuse(t *testing.T) { func TestRemoteNonceChange(t *testing.T) { ms, account := create() nn := make([]bool, 10) - for i, _ := range nn { + for i := range nn { nn[i] = true } account.nonces = append(account.nonces, nn...) diff --git a/core/state/statedb.go b/core/state/statedb.go index bbcde94433..75c40b364e 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -479,7 +479,7 @@ func (self *StateDB) Copy() *StateDB { logSize: self.logSize, } // Copy the dirty states and logs - for addr, _ := range self.stateObjectsDirty { + for addr := range self.stateObjectsDirty { state.stateObjects[addr] = self.stateObjects[addr].deepCopy(state, state.MarkStateObjectDirty) state.stateObjectsDirty[addr] = struct{}{} } @@ -530,7 +530,7 @@ func (self *StateDB) GetRefund() *big.Int { // It is called in between transactions to get the root hash that // goes into transaction receipts. func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { - for addr, _ := range s.stateObjectsDirty { + for addr := range s.stateObjectsDirty { stateObject := s.stateObjects[addr] if stateObject.suicided || (deleteEmptyObjects && stateObject.empty()) { s.deleteStateObject(stateObject) @@ -553,7 +553,7 @@ func (s *StateDB) DeleteSuicides() { // Reset refund so that any used-gas calculations can use this method. s.clearJournalAndRefund() - for addr, _ := range s.stateObjectsDirty { + for addr := range s.stateObjectsDirty { stateObject := s.stateObjects[addr] // If the object has been removed by a suicide diff --git a/core/state/sync_test.go b/core/state/sync_test.go index 2a30c86f43..cb585f78c2 100644 --- a/core/state/sync_test.go +++ b/core/state/sync_test.go @@ -198,7 +198,7 @@ func testIterativeRandomStateSync(t *testing.T, batch int) { for len(queue) > 0 { // Fetch all the queued nodes in a random order results := make([]trie.SyncResult, 0, len(queue)) - for hash, _ := range queue { + for hash := range queue { data, err := srcDb.Get(hash.Bytes()) if err != nil { t.Fatalf("failed to retrieve node data for %x: %v", hash, err) @@ -235,7 +235,7 @@ func TestIterativeRandomDelayedStateSync(t *testing.T) { for len(queue) > 0 { // Sync only half of the scheduled nodes, even those in random order results := make([]trie.SyncResult, 0, len(queue)/2+1) - for hash, _ := range queue { + for hash := range queue { delete(queue, hash) data, err := srcDb.Get(hash.Bytes()) diff --git a/core/tx_list.go b/core/tx_list.go index c3ddf3148e..95831c83b1 100644 --- a/core/tx_list.go +++ b/core/tx_list.go @@ -110,7 +110,7 @@ func (m *txSortedMap) Filter(filter func(*types.Transaction) bool) types.Transac // If transactions were removed, the heap and cache are ruined if len(removed) > 0 { *m.index = make([]uint64, 0, len(m.items)) - for nonce, _ := range m.items { + for nonce := range m.items { *m.index = append(*m.index, nonce) } heap.Init(m.index) diff --git a/core/tx_pool.go b/core/tx_pool.go index c5421fa021..2e1e5c63ef 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -609,7 +609,7 @@ func (pool *TxPool) promoteExecutables(state *state.StateDB) { if queued > maxQueuedInTotal { // Sort all accounts with queued transactions by heartbeat addresses := make(addresssByHeartbeat, 0, len(pool.queue)) - for addr, _ := range pool.queue { + for addr := range pool.queue { addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) } sort.Sort(addresses) diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go index eb85ae6aff..f4ce818833 100644 --- a/core/vm/jump_table.go +++ b/core/vm/jump_table.go @@ -52,283 +52,283 @@ var defaultJumpTable = NewJumpTable() func NewJumpTable() [256]operation { return [256]operation{ - ADD: operation{ + ADD: { execute: opAdd, gasCost: constGasFunc(GasFastestStep), validateStack: makeStackFunc(2, 1), valid: true, }, - SUB: operation{ + SUB: { execute: opSub, gasCost: constGasFunc(GasFastestStep), validateStack: makeStackFunc(2, 1), valid: true, }, - MUL: operation{ + MUL: { execute: opMul, gasCost: constGasFunc(GasFastStep), validateStack: makeStackFunc(2, 1), valid: true, }, - DIV: operation{ + DIV: { execute: opDiv, gasCost: constGasFunc(GasFastStep), validateStack: makeStackFunc(2, 1), valid: true, }, - SDIV: operation{ + SDIV: { execute: opSdiv, gasCost: constGasFunc(GasFastStep), validateStack: makeStackFunc(2, 1), valid: true, }, - MOD: operation{ + MOD: { execute: opMod, gasCost: constGasFunc(GasFastStep), validateStack: makeStackFunc(2, 1), valid: true, }, - SMOD: operation{ + SMOD: { execute: opSmod, gasCost: constGasFunc(GasFastStep), validateStack: makeStackFunc(2, 1), valid: true, }, - EXP: operation{ + EXP: { execute: opExp, gasCost: gasExp, validateStack: makeStackFunc(2, 1), valid: true, }, - SIGNEXTEND: operation{ + SIGNEXTEND: { execute: opSignExtend, gasCost: constGasFunc(GasFastStep), validateStack: makeStackFunc(2, 1), valid: true, }, - NOT: operation{ + NOT: { execute: opNot, gasCost: constGasFunc(GasFastestStep), validateStack: makeStackFunc(1, 1), valid: true, }, - LT: operation{ + LT: { execute: opLt, gasCost: constGasFunc(GasFastestStep), validateStack: makeStackFunc(2, 1), valid: true, }, - GT: operation{ + GT: { execute: opGt, gasCost: constGasFunc(GasFastestStep), validateStack: makeStackFunc(2, 1), valid: true, }, - SLT: operation{ + SLT: { execute: opSlt, gasCost: constGasFunc(GasFastestStep), validateStack: makeStackFunc(2, 1), valid: true, }, - SGT: operation{ + SGT: { execute: opSgt, gasCost: constGasFunc(GasFastestStep), validateStack: makeStackFunc(2, 1), valid: true, }, - EQ: operation{ + EQ: { execute: opEq, gasCost: constGasFunc(GasFastestStep), validateStack: makeStackFunc(2, 1), valid: true, }, - ISZERO: operation{ + ISZERO: { execute: opIszero, gasCost: constGasFunc(GasFastestStep), validateStack: makeStackFunc(1, 1), valid: true, }, - AND: operation{ + AND: { execute: opAnd, gasCost: constGasFunc(GasFastestStep), validateStack: makeStackFunc(2, 1), valid: true, }, - OR: operation{ + OR: { execute: opOr, gasCost: constGasFunc(GasFastestStep), validateStack: makeStackFunc(2, 1), valid: true, }, - XOR: operation{ + XOR: { execute: opXor, gasCost: constGasFunc(GasFastestStep), validateStack: makeStackFunc(2, 1), valid: true, }, - BYTE: operation{ + BYTE: { execute: opByte, gasCost: constGasFunc(GasFastestStep), validateStack: makeStackFunc(2, 1), valid: true, }, - ADDMOD: operation{ + ADDMOD: { execute: opAddmod, gasCost: constGasFunc(GasMidStep), validateStack: makeStackFunc(3, 1), valid: true, }, - MULMOD: operation{ + MULMOD: { execute: opMulmod, gasCost: constGasFunc(GasMidStep), validateStack: makeStackFunc(3, 1), valid: true, }, - SHA3: operation{ + SHA3: { execute: opSha3, gasCost: gasSha3, validateStack: makeStackFunc(2, 1), memorySize: memorySha3, valid: true, }, - ADDRESS: operation{ + ADDRESS: { execute: opAddress, gasCost: constGasFunc(GasQuickStep), validateStack: makeStackFunc(0, 1), valid: true, }, - BALANCE: operation{ + BALANCE: { execute: opBalance, gasCost: gasBalance, validateStack: makeStackFunc(0, 1), valid: true, }, - ORIGIN: operation{ + ORIGIN: { execute: opOrigin, gasCost: constGasFunc(GasQuickStep), validateStack: makeStackFunc(0, 1), valid: true, }, - CALLER: operation{ + CALLER: { execute: opCaller, gasCost: constGasFunc(GasQuickStep), validateStack: makeStackFunc(0, 1), valid: true, }, - CALLVALUE: operation{ + CALLVALUE: { execute: opCallValue, gasCost: constGasFunc(GasQuickStep), validateStack: makeStackFunc(0, 1), valid: true, }, - CALLDATALOAD: operation{ + CALLDATALOAD: { execute: opCalldataLoad, gasCost: constGasFunc(GasFastestStep), validateStack: makeStackFunc(1, 1), valid: true, }, - CALLDATASIZE: operation{ + CALLDATASIZE: { execute: opCalldataSize, gasCost: constGasFunc(GasQuickStep), validateStack: makeStackFunc(0, 1), valid: true, }, - CALLDATACOPY: operation{ + CALLDATACOPY: { execute: opCalldataCopy, gasCost: gasCalldataCopy, validateStack: makeStackFunc(3, 1), memorySize: memoryCalldataCopy, valid: true, }, - CODESIZE: operation{ + CODESIZE: { execute: opCodeSize, gasCost: constGasFunc(GasQuickStep), validateStack: makeStackFunc(0, 1), valid: true, }, - EXTCODESIZE: operation{ + EXTCODESIZE: { execute: opExtCodeSize, gasCost: gasExtCodeSize, validateStack: makeStackFunc(1, 1), valid: true, }, - CODECOPY: operation{ + CODECOPY: { execute: opCodeCopy, gasCost: gasCodeCopy, validateStack: makeStackFunc(3, 0), memorySize: memoryCodeCopy, valid: true, }, - EXTCODECOPY: operation{ + EXTCODECOPY: { execute: opExtCodeCopy, gasCost: gasExtCodeCopy, validateStack: makeStackFunc(4, 0), memorySize: memoryExtCodeCopy, valid: true, }, - GASPRICE: operation{ + GASPRICE: { execute: opGasprice, gasCost: constGasFunc(GasQuickStep), validateStack: makeStackFunc(0, 1), valid: true, }, - BLOCKHASH: operation{ + BLOCKHASH: { execute: opBlockhash, gasCost: constGasFunc(GasExtStep), validateStack: makeStackFunc(1, 1), valid: true, }, - COINBASE: operation{ + COINBASE: { execute: opCoinbase, gasCost: constGasFunc(GasQuickStep), validateStack: makeStackFunc(0, 1), valid: true, }, - TIMESTAMP: operation{ + TIMESTAMP: { execute: opTimestamp, gasCost: constGasFunc(GasQuickStep), validateStack: makeStackFunc(0, 1), valid: true, }, - NUMBER: operation{ + NUMBER: { execute: opNumber, gasCost: constGasFunc(GasQuickStep), validateStack: makeStackFunc(0, 1), valid: true, }, - DIFFICULTY: operation{ + DIFFICULTY: { execute: opDifficulty, gasCost: constGasFunc(GasQuickStep), validateStack: makeStackFunc(0, 1), valid: true, }, - GASLIMIT: operation{ + GASLIMIT: { execute: opGasLimit, gasCost: constGasFunc(GasQuickStep), validateStack: makeStackFunc(0, 1), valid: true, }, - POP: operation{ + POP: { execute: opPop, gasCost: constGasFunc(GasQuickStep), validateStack: makeStackFunc(1, 0), valid: true, }, - MLOAD: operation{ + MLOAD: { execute: opMload, gasCost: gasMLoad, validateStack: makeStackFunc(1, 1), memorySize: memoryMLoad, valid: true, }, - MSTORE: operation{ + MSTORE: { execute: opMstore, gasCost: gasMStore, validateStack: makeStackFunc(2, 0), memorySize: memoryMStore, valid: true, }, - MSTORE8: operation{ + MSTORE8: { execute: opMstore8, gasCost: gasMStore8, memorySize: memoryMStore8, @@ -336,71 +336,71 @@ func NewJumpTable() [256]operation { valid: true, }, - SLOAD: operation{ + SLOAD: { execute: opSload, gasCost: gasSLoad, validateStack: makeStackFunc(1, 1), valid: true, }, - SSTORE: operation{ + SSTORE: { execute: opSstore, gasCost: gasSStore, validateStack: makeStackFunc(2, 0), valid: true, }, - JUMPDEST: operation{ + JUMPDEST: { execute: opJumpdest, gasCost: constGasFunc(params.JumpdestGas), validateStack: makeStackFunc(0, 0), valid: true, }, - PC: operation{ + PC: { execute: opPc, gasCost: constGasFunc(GasQuickStep), validateStack: makeStackFunc(0, 1), valid: true, }, - MSIZE: operation{ + MSIZE: { execute: opMsize, gasCost: constGasFunc(GasQuickStep), validateStack: makeStackFunc(0, 1), valid: true, }, - GAS: operation{ + GAS: { execute: opGas, gasCost: constGasFunc(GasQuickStep), validateStack: makeStackFunc(0, 1), valid: true, }, - CREATE: operation{ + CREATE: { execute: opCreate, gasCost: gasCreate, validateStack: makeStackFunc(3, 1), memorySize: memoryCreate, valid: true, }, - CALL: operation{ + CALL: { execute: opCall, gasCost: gasCall, validateStack: makeStackFunc(7, 1), memorySize: memoryCall, valid: true, }, - CALLCODE: operation{ + CALLCODE: { execute: opCallCode, gasCost: gasCallCode, validateStack: makeStackFunc(7, 1), memorySize: memoryCall, valid: true, }, - DELEGATECALL: operation{ + DELEGATECALL: { execute: opDelegateCall, gasCost: gasDelegateCall, validateStack: makeStackFunc(6, 1), memorySize: memoryDelegateCall, valid: true, }, - RETURN: operation{ + RETURN: { execute: opReturn, gasCost: gasReturn, validateStack: makeStackFunc(2, 0), @@ -408,448 +408,448 @@ func NewJumpTable() [256]operation { halts: true, valid: true, }, - SUICIDE: operation{ + SUICIDE: { execute: opSuicide, gasCost: gasSuicide, validateStack: makeStackFunc(1, 0), halts: true, valid: true, }, - JUMP: operation{ + JUMP: { execute: opJump, gasCost: constGasFunc(GasMidStep), validateStack: makeStackFunc(1, 0), jumps: true, valid: true, }, - JUMPI: operation{ + JUMPI: { execute: opJumpi, gasCost: constGasFunc(GasSlowStep), validateStack: makeStackFunc(2, 0), jumps: true, valid: true, }, - STOP: operation{ + STOP: { execute: opStop, gasCost: constGasFunc(Zero), validateStack: makeStackFunc(0, 0), halts: true, valid: true, }, - LOG0: operation{ + LOG0: { execute: makeLog(0), gasCost: makeGasLog(0), validateStack: makeStackFunc(2, 0), memorySize: memoryLog, valid: true, }, - LOG1: operation{ + LOG1: { execute: makeLog(1), gasCost: makeGasLog(1), validateStack: makeStackFunc(3, 0), memorySize: memoryLog, valid: true, }, - LOG2: operation{ + LOG2: { execute: makeLog(2), gasCost: makeGasLog(2), validateStack: makeStackFunc(4, 0), memorySize: memoryLog, valid: true, }, - LOG3: operation{ + LOG3: { execute: makeLog(3), gasCost: makeGasLog(3), validateStack: makeStackFunc(5, 0), memorySize: memoryLog, valid: true, }, - LOG4: operation{ + LOG4: { execute: makeLog(4), gasCost: makeGasLog(4), validateStack: makeStackFunc(6, 0), memorySize: memoryLog, valid: true, }, - SWAP1: operation{ + SWAP1: { execute: makeSwap(1), gasCost: gasSwap, validateStack: makeStackFunc(2, 0), valid: true, }, - SWAP2: operation{ + SWAP2: { execute: makeSwap(2), gasCost: gasSwap, validateStack: makeStackFunc(3, 0), valid: true, }, - SWAP3: operation{ + SWAP3: { execute: makeSwap(3), gasCost: gasSwap, validateStack: makeStackFunc(4, 0), valid: true, }, - SWAP4: operation{ + SWAP4: { execute: makeSwap(4), gasCost: gasSwap, validateStack: makeStackFunc(5, 0), valid: true, }, - SWAP5: operation{ + SWAP5: { execute: makeSwap(5), gasCost: gasSwap, validateStack: makeStackFunc(6, 0), valid: true, }, - SWAP6: operation{ + SWAP6: { execute: makeSwap(6), gasCost: gasSwap, validateStack: makeStackFunc(7, 0), valid: true, }, - SWAP7: operation{ + SWAP7: { execute: makeSwap(7), gasCost: gasSwap, validateStack: makeStackFunc(8, 0), valid: true, }, - SWAP8: operation{ + SWAP8: { execute: makeSwap(8), gasCost: gasSwap, validateStack: makeStackFunc(9, 0), valid: true, }, - SWAP9: operation{ + SWAP9: { execute: makeSwap(9), gasCost: gasSwap, validateStack: makeStackFunc(10, 0), valid: true, }, - SWAP10: operation{ + SWAP10: { execute: makeSwap(10), gasCost: gasSwap, validateStack: makeStackFunc(11, 0), valid: true, }, - SWAP11: operation{ + SWAP11: { execute: makeSwap(11), gasCost: gasSwap, validateStack: makeStackFunc(12, 0), valid: true, }, - SWAP12: operation{ + SWAP12: { execute: makeSwap(12), gasCost: gasSwap, validateStack: makeStackFunc(13, 0), valid: true, }, - SWAP13: operation{ + SWAP13: { execute: makeSwap(13), gasCost: gasSwap, validateStack: makeStackFunc(14, 0), valid: true, }, - SWAP14: operation{ + SWAP14: { execute: makeSwap(14), gasCost: gasSwap, validateStack: makeStackFunc(15, 0), valid: true, }, - SWAP15: operation{ + SWAP15: { execute: makeSwap(15), gasCost: gasSwap, validateStack: makeStackFunc(16, 0), valid: true, }, - SWAP16: operation{ + SWAP16: { execute: makeSwap(16), gasCost: gasSwap, validateStack: makeStackFunc(17, 0), valid: true, }, - PUSH1: operation{ + PUSH1: { execute: makePush(1, big.NewInt(1)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH2: operation{ + PUSH2: { execute: makePush(2, big.NewInt(2)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH3: operation{ + PUSH3: { execute: makePush(3, big.NewInt(3)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH4: operation{ + PUSH4: { execute: makePush(4, big.NewInt(4)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH5: operation{ + PUSH5: { execute: makePush(5, big.NewInt(5)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH6: operation{ + PUSH6: { execute: makePush(6, big.NewInt(6)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH7: operation{ + PUSH7: { execute: makePush(7, big.NewInt(7)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH8: operation{ + PUSH8: { execute: makePush(8, big.NewInt(8)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH9: operation{ + PUSH9: { execute: makePush(9, big.NewInt(9)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH10: operation{ + PUSH10: { execute: makePush(10, big.NewInt(10)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH11: operation{ + PUSH11: { execute: makePush(11, big.NewInt(11)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH12: operation{ + PUSH12: { execute: makePush(12, big.NewInt(12)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH13: operation{ + PUSH13: { execute: makePush(13, big.NewInt(13)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH14: operation{ + PUSH14: { execute: makePush(14, big.NewInt(14)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH15: operation{ + PUSH15: { execute: makePush(15, big.NewInt(15)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH16: operation{ + PUSH16: { execute: makePush(16, big.NewInt(16)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH17: operation{ + PUSH17: { execute: makePush(17, big.NewInt(17)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH18: operation{ + PUSH18: { execute: makePush(18, big.NewInt(18)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH19: operation{ + PUSH19: { execute: makePush(19, big.NewInt(19)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH20: operation{ + PUSH20: { execute: makePush(20, big.NewInt(20)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH21: operation{ + PUSH21: { execute: makePush(21, big.NewInt(21)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH22: operation{ + PUSH22: { execute: makePush(22, big.NewInt(22)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH23: operation{ + PUSH23: { execute: makePush(23, big.NewInt(23)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH24: operation{ + PUSH24: { execute: makePush(24, big.NewInt(24)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH25: operation{ + PUSH25: { execute: makePush(25, big.NewInt(25)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH26: operation{ + PUSH26: { execute: makePush(26, big.NewInt(26)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH27: operation{ + PUSH27: { execute: makePush(27, big.NewInt(27)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH28: operation{ + PUSH28: { execute: makePush(28, big.NewInt(28)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH29: operation{ + PUSH29: { execute: makePush(29, big.NewInt(29)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH30: operation{ + PUSH30: { execute: makePush(30, big.NewInt(30)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH31: operation{ + PUSH31: { execute: makePush(31, big.NewInt(31)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - PUSH32: operation{ + PUSH32: { execute: makePush(32, big.NewInt(32)), gasCost: gasPush, validateStack: makeStackFunc(0, 1), valid: true, }, - DUP1: operation{ + DUP1: { execute: makeDup(1), gasCost: gasDup, validateStack: makeStackFunc(1, 1), valid: true, }, - DUP2: operation{ + DUP2: { execute: makeDup(2), gasCost: gasDup, validateStack: makeStackFunc(2, 1), valid: true, }, - DUP3: operation{ + DUP3: { execute: makeDup(3), gasCost: gasDup, validateStack: makeStackFunc(3, 1), valid: true, }, - DUP4: operation{ + DUP4: { execute: makeDup(4), gasCost: gasDup, validateStack: makeStackFunc(4, 1), valid: true, }, - DUP5: operation{ + DUP5: { execute: makeDup(5), gasCost: gasDup, validateStack: makeStackFunc(5, 1), valid: true, }, - DUP6: operation{ + DUP6: { execute: makeDup(6), gasCost: gasDup, validateStack: makeStackFunc(6, 1), valid: true, }, - DUP7: operation{ + DUP7: { execute: makeDup(7), gasCost: gasDup, validateStack: makeStackFunc(7, 1), valid: true, }, - DUP8: operation{ + DUP8: { execute: makeDup(8), gasCost: gasDup, validateStack: makeStackFunc(8, 1), valid: true, }, - DUP9: operation{ + DUP9: { execute: makeDup(9), gasCost: gasDup, validateStack: makeStackFunc(9, 1), valid: true, }, - DUP10: operation{ + DUP10: { execute: makeDup(10), gasCost: gasDup, validateStack: makeStackFunc(10, 1), valid: true, }, - DUP11: operation{ + DUP11: { execute: makeDup(11), gasCost: gasDup, validateStack: makeStackFunc(11, 1), valid: true, }, - DUP12: operation{ + DUP12: { execute: makeDup(12), gasCost: gasDup, validateStack: makeStackFunc(12, 1), valid: true, }, - DUP13: operation{ + DUP13: { execute: makeDup(13), gasCost: gasDup, validateStack: makeStackFunc(13, 1), valid: true, }, - DUP14: operation{ + DUP14: { execute: makeDup(14), gasCost: gasDup, validateStack: makeStackFunc(14, 1), valid: true, }, - DUP15: operation{ + DUP15: { execute: makeDup(15), gasCost: gasDup, validateStack: makeStackFunc(15, 1), valid: true, }, - DUP16: operation{ + DUP16: { execute: makeDup(16), gasCost: gasDup, validateStack: makeStackFunc(16, 1), diff --git a/crypto/ecies/asn1.go b/crypto/ecies/asn1.go index 40dabd329b..508a645cdf 100644 --- a/crypto/ecies/asn1.go +++ b/crypto/ecies/asn1.go @@ -109,7 +109,7 @@ func (curve secgNamedCurve) Equal(curve2 secgNamedCurve) bool { if len(curve) != len(curve2) { return false } - for i, _ := range curve { + for i := range curve { if curve[i] != curve2[i] { return false } @@ -157,7 +157,7 @@ func (a asnAlgorithmIdentifier) Cmp(b asnAlgorithmIdentifier) bool { if len(a.Algorithm) != len(b.Algorithm) { return false } - for i, _ := range a.Algorithm { + for i := range a.Algorithm { if a.Algorithm[i] != b.Algorithm[i] { return false } @@ -306,7 +306,7 @@ func (a asnECDHAlgorithm) Cmp(b asnECDHAlgorithm) bool { if len(a.Algorithm) != len(b.Algorithm) { return false } - for i, _ := range a.Algorithm { + for i := range a.Algorithm { if a.Algorithm[i] != b.Algorithm[i] { return false } @@ -325,7 +325,7 @@ func (a asnKeyDerivationFunction) Cmp(b asnKeyDerivationFunction) bool { if len(a.Algorithm) != len(b.Algorithm) { return false } - for i, _ := range a.Algorithm { + for i := range a.Algorithm { if a.Algorithm[i] != b.Algorithm[i] { return false } @@ -360,7 +360,7 @@ func (a asnSymmetricEncryption) Cmp(b asnSymmetricEncryption) bool { if len(a.Algorithm) != len(b.Algorithm) { return false } - for i, _ := range a.Algorithm { + for i := range a.Algorithm { if a.Algorithm[i] != b.Algorithm[i] { return false } @@ -380,7 +380,7 @@ func (a asnMessageAuthenticationCode) Cmp(b asnMessageAuthenticationCode) bool { if len(a.Algorithm) != len(b.Algorithm) { return false } - for i, _ := range a.Algorithm { + for i := range a.Algorithm { if a.Algorithm[i] != b.Algorithm[i] { return false } diff --git a/crypto/ecies/ecies_test.go b/crypto/ecies/ecies_test.go index cb09061ced..3b3517baf3 100644 --- a/crypto/ecies/ecies_test.go +++ b/crypto/ecies/ecies_test.go @@ -492,17 +492,17 @@ type testCase struct { } var testCases = []testCase{ - testCase{ + { Curve: elliptic.P256(), Name: "P256", Expected: true, }, - testCase{ + { Curve: elliptic.P384(), Name: "P384", Expected: true, }, - testCase{ + { Curve: elliptic.P521(), Name: "P521", Expected: true, diff --git a/crypto/sha3/sha3_test.go b/crypto/sha3/sha3_test.go index caf72f279f..c433761a8a 100644 --- a/crypto/sha3/sha3_test.go +++ b/crypto/sha3/sha3_test.go @@ -201,7 +201,7 @@ func TestSqueezing(t *testing.T) { d1 := newShakeHash() d1.Write([]byte(testString)) var multiple []byte - for _ = range ref { + for range ref { one := make([]byte, 1) d1.Read(one) multiple = append(multiple, one...) diff --git a/eth/backend_test.go b/eth/backend_test.go index 8d55f30b90..574731fbe5 100644 --- a/eth/backend_test.go +++ b/eth/backend_test.go @@ -37,12 +37,12 @@ func TestMipmapUpgrade(t *testing.T) { switch i { case 1: receipt := types.NewReceipt(nil, new(big.Int)) - receipt.Logs = []*types.Log{&types.Log{Address: addr}} + receipt.Logs = []*types.Log{{Address: addr}} gen.AddUncheckedReceipt(receipt) receipts = types.Receipts{receipt} case 2: receipt := types.NewReceipt(nil, new(big.Int)) - receipt.Logs = []*types.Log{&types.Log{Address: addr}} + receipt.Logs = []*types.Log{{Address: addr}} gen.AddUncheckedReceipt(receipt) receipts = types.Receipts{receipt} } diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go index b0bfc66c82..ea4b6a6f2f 100644 --- a/eth/downloader/peer.go +++ b/eth/downloader/peer.go @@ -205,7 +205,7 @@ func (p *peer) FetchNodeData(request *fetchRequest) error { // Convert the hash set to a retrievable slice hashes := make([]common.Hash, 0, len(request.Hashes)) - for hash, _ := range request.Hashes { + for hash := range request.Hashes { hashes = append(hashes, hash) } go p.getNodeData(hashes) @@ -314,7 +314,7 @@ func (p *peer) MarkLacking(hash common.Hash) { defer p.lock.Unlock() for len(p.lacking) >= maxLackingHashes { - for drop, _ := range p.lacking { + for drop := range p.lacking { delete(p.lacking, drop) break } diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index 060eaf9706..dd9590b287 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -844,7 +844,7 @@ func (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, } } // Remove the expired requests from the pending pool - for id, _ := range expiries { + for id := range expiries { delete(pendPool, id) } return expiries @@ -1063,7 +1063,7 @@ func (q *queue) DeliverNodeData(id string, data [][]byte, callback func(int, boo // If no data was retrieved, mark their hashes as unavailable for the origin peer if len(data) == 0 { - for hash, _ := range request.Hashes { + for hash := range request.Hashes { request.Peer.MarkLacking(hash) } } diff --git a/eth/filters/api.go b/eth/filters/api.go index 65d7fd17e2..02a544ce1b 100644 --- a/eth/filters/api.go +++ b/eth/filters/api.go @@ -505,7 +505,7 @@ func (args *FilterCriteria) UnmarshalJSON(data []byte) error { switch topic := t.(type) { case nil: // ignore topic when matching logs - args.Topics[i] = []common.Hash{common.Hash{}} + args.Topics[i] = []common.Hash{{}} case string: // match specific topic diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go index 5f51c0aa8a..cd0745ec13 100644 --- a/eth/filters/filter_system_test.go +++ b/eth/filters/filter_system_test.go @@ -281,15 +281,15 @@ func TestLogFilter(t *testing.T) { // match all 0: {FilterCriteria{}, allLogs, ""}, // match none due to no matching addresses - 1: {FilterCriteria{Addresses: []common.Address{common.Address{}, notUsedAddress}, Topics: [][]common.Hash{allLogs[0].Topics}}, []*types.Log{}, ""}, + 1: {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{allLogs[0].Topics}}, []*types.Log{}, ""}, // match logs based on addresses, ignore topics 2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""}, // match none due to no matching topics (match with address) - 3: {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{[]common.Hash{notUsedTopic}}}, []*types.Log{}, ""}, + 3: {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, ""}, // match logs based on addresses and topics - 4: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{[]common.Hash{firstTopic, secondTopic}}}, allLogs[3:5], ""}, + 4: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[3:5], ""}, // match logs based on multiple addresses and "or" topics - 5: {FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{[]common.Hash{firstTopic, secondTopic}}}, allLogs[2:5], ""}, + 5: {FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[2:5], ""}, // logs in the pending block 6: {FilterCriteria{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, allLogs[:2], ""}, // mined logs with block num >= 2 or pending logs @@ -299,9 +299,9 @@ func TestLogFilter(t *testing.T) { // all "mined" logs 9: {FilterCriteria{ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs, ""}, // all "mined" logs with 1>= block num <=2 and topic secondTopic - 10: {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2), Topics: [][]common.Hash{[]common.Hash{secondTopic}}}, allLogs[3:4], ""}, + 10: {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2), Topics: [][]common.Hash{{secondTopic}}}, allLogs[3:4], ""}, // all "mined" and pending logs with topic firstTopic - 11: {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), Topics: [][]common.Hash{[]common.Hash{firstTopic}}}, expectedCase11, ""}, + 11: {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), Topics: [][]common.Hash{{firstTopic}}}, expectedCase11, ""}, } ) @@ -402,19 +402,19 @@ func TestPendingLogsSubscription(t *testing.T) { // match all {FilterCriteria{}, convertLogs(allLogs), nil, nil}, // match none due to no matching addresses - {FilterCriteria{Addresses: []common.Address{common.Address{}, notUsedAddress}, Topics: [][]common.Hash{[]common.Hash{}}}, []*types.Log{}, nil, nil}, + {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{{}}}, []*types.Log{}, nil, nil}, // match logs based on addresses, ignore topics {FilterCriteria{Addresses: []common.Address{firstAddr}}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil}, // match none due to no matching topics (match with address) - {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{[]common.Hash{notUsedTopic}}}, []*types.Log{}, nil, nil}, + {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, nil, nil}, // match logs based on addresses and topics - {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{[]common.Hash{firstTopic, secondTopic}}}, append(convertLogs(allLogs[3:5]), allLogs[5].Logs[0]), nil, nil}, + {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[3:5]), allLogs[5].Logs[0]), nil, nil}, // match logs based on multiple addresses and "or" topics - {FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{[]common.Hash{firstTopic, secondTopic}}}, append(convertLogs(allLogs[2:5]), allLogs[5].Logs[0]), nil, nil}, + {FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[2:5]), allLogs[5].Logs[0]), nil, nil}, // block numbers are ignored for filters created with New***Filter, these return all logs that match the given criterias when the state changes {FilterCriteria{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(2), ToBlock: big.NewInt(3)}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil}, // multiple pending logs, should match only 2 topics from the logs in block 5 - {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{[]common.Hash{firstTopic, forthTopic}}}, []*types.Log{allLogs[5].Logs[0], allLogs[5].Logs[2]}, nil, nil}, + {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, forthTopic}}}, []*types.Log{allLogs[5].Logs[0], allLogs[5].Logs[2]}, nil, nil}, } ) diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go index 35495e8af2..83ff3e9ce0 100644 --- a/eth/filters/filter_test.go +++ b/eth/filters/filter_test.go @@ -210,7 +210,7 @@ func TestFilters(t *testing.T) { filter := New(backend, true) filter.SetAddresses([]common.Address{addr}) - filter.SetTopics([][]common.Hash{[]common.Hash{hash1, hash2, hash3, hash4}}) + filter.SetTopics([][]common.Hash{{hash1, hash2, hash3, hash4}}) filter.SetBeginBlock(0) filter.SetEndBlock(-1) @@ -221,7 +221,7 @@ func TestFilters(t *testing.T) { filter = New(backend, true) filter.SetAddresses([]common.Address{addr}) - filter.SetTopics([][]common.Hash{[]common.Hash{hash3}}) + filter.SetTopics([][]common.Hash{{hash3}}) filter.SetBeginBlock(900) filter.SetEndBlock(999) logs, _ = filter.Find(context.Background()) @@ -234,7 +234,7 @@ func TestFilters(t *testing.T) { filter = New(backend, true) filter.SetAddresses([]common.Address{addr}) - filter.SetTopics([][]common.Hash{[]common.Hash{hash3}}) + filter.SetTopics([][]common.Hash{{hash3}}) filter.SetBeginBlock(990) filter.SetEndBlock(-1) logs, _ = filter.Find(context.Background()) @@ -246,7 +246,7 @@ func TestFilters(t *testing.T) { } filter = New(backend, true) - filter.SetTopics([][]common.Hash{[]common.Hash{hash1, hash2}}) + filter.SetTopics([][]common.Hash{{hash1, hash2}}) filter.SetBeginBlock(1) filter.SetEndBlock(10) @@ -257,7 +257,7 @@ func TestFilters(t *testing.T) { failHash := common.BytesToHash([]byte("fail")) filter = New(backend, true) - filter.SetTopics([][]common.Hash{[]common.Hash{failHash}}) + filter.SetTopics([][]common.Hash{{failHash}}) filter.SetBeginBlock(0) filter.SetEndBlock(-1) @@ -278,7 +278,7 @@ func TestFilters(t *testing.T) { } filter = New(backend, true) - filter.SetTopics([][]common.Hash{[]common.Hash{failHash}, []common.Hash{hash1}}) + filter.SetTopics([][]common.Hash{{failHash}, {hash1}}) filter.SetBeginBlock(0) filter.SetEndBlock(-1) diff --git a/eth/handler_test.go b/eth/handler_test.go index 6900f78080..4695b3f605 100644 --- a/eth/handler_test.go +++ b/eth/handler_test.go @@ -75,7 +75,7 @@ func testGetBlockHeaders(t *testing.T, protocol int) { // Create a "random" unknown hash for testing var unknown common.Hash - for i, _ := range unknown { + for i := range unknown { unknown[i] = byte(i) } // Create a batch of tests for various scenarios @@ -246,17 +246,17 @@ func testGetBlockBodies(t *testing.T, protocol int) { {limit + 1, nil, nil, limit}, // No more than the possible block count should be returned {0, []common.Hash{pm.blockchain.Genesis().Hash()}, []bool{true}, 1}, // The genesis block should be retrievable {0, []common.Hash{pm.blockchain.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable - {0, []common.Hash{common.Hash{}}, []bool{false}, 0}, // A non existent block should not be returned + {0, []common.Hash{{}}, []bool{false}, 0}, // A non existent block should not be returned // Existing and non-existing blocks interleaved should not cause problems {0, []common.Hash{ - common.Hash{}, + {}, pm.blockchain.GetBlockByNumber(1).Hash(), - common.Hash{}, + {}, pm.blockchain.GetBlockByNumber(10).Hash(), - common.Hash{}, + {}, pm.blockchain.GetBlockByNumber(100).Hash(), - common.Hash{}, + {}, }, []bool{false, true, false, true, false, true, false}, 3}, } // Run each of the tests and verify the results against the chain diff --git a/eth/protocol_test.go b/eth/protocol_test.go index 0aac19f435..43149d0c0e 100644 --- a/eth/protocol_test.go +++ b/eth/protocol_test.go @@ -178,7 +178,7 @@ func testSendTransactions(t *testing.T, protocol int) { func TestGetBlockHeadersDataEncodeDecode(t *testing.T) { // Create a "random" hash for testing var hash common.Hash - for i, _ := range hash { + for i := range hash { hash[i] = byte(i) } // Assemble some table driven tests diff --git a/ethdb/memory_database.go b/ethdb/memory_database.go index a729f52332..65c4879347 100644 --- a/ethdb/memory_database.go +++ b/ethdb/memory_database.go @@ -67,7 +67,7 @@ func (db *MemDatabase) Keys() [][]byte { defer db.lock.RUnlock() keys := [][]byte{} - for key, _ := range db.db { + for key := range db.db { keys = append(keys, []byte(key)) } return keys diff --git a/ethstats/ethstats.go b/ethstats/ethstats.go index 716beef697..b802d347f6 100644 --- a/ethstats/ethstats.go +++ b/ethstats/ethstats.go @@ -326,7 +326,7 @@ func (s *Service) login(in *json.Decoder, out *json.Encoder) error { Secret: s.pass, } login := map[string][]interface{}{ - "emit": []interface{}{"hello", auth}, + "emit": {"hello", auth}, } if err := out.Encode(login); err != nil { return err @@ -365,7 +365,7 @@ func (s *Service) reportLatency(out *json.Encoder) error { start := time.Now() ping := map[string][]interface{}{ - "emit": []interface{}{"node-ping", map[string]string{ + "emit": {"node-ping", map[string]string{ "id": s.node, "clientTime": start.String(), }}, @@ -383,7 +383,7 @@ func (s *Service) reportLatency(out *json.Encoder) error { } // Send back the measured latency latency := map[string][]interface{}{ - "emit": []interface{}{"latency", map[string]string{ + "emit": {"latency", map[string]string{ "id": s.node, "latency": strconv.Itoa(int((time.Since(start) / time.Duration(2)).Nanoseconds() / 1000000)), }}, @@ -438,7 +438,7 @@ func (s *Service) reportBlock(out *json.Encoder, block *types.Block) error { "block": s.assembleBlockStats(block), } report := map[string][]interface{}{ - "emit": []interface{}{"block", stats}, + "emit": {"block", stats}, } if err := out.Encode(report); err != nil { return err @@ -531,7 +531,7 @@ func (s *Service) reportHistory(out *json.Encoder, list []uint64) error { "history": history, } report := map[string][]interface{}{ - "emit": []interface{}{"history", stats}, + "emit": {"history", stats}, } if err := out.Encode(report); err != nil { return err @@ -562,7 +562,7 @@ func (s *Service) reportPending(out *json.Encoder) error { }, } report := map[string][]interface{}{ - "emit": []interface{}{"pending", stats}, + "emit": {"pending", stats}, } if err := out.Encode(report); err != nil { return err @@ -616,7 +616,7 @@ func (s *Service) reportStats(out *json.Encoder) error { }, } report := map[string][]interface{}{ - "emit": []interface{}{"stats", stats}, + "emit": {"stats", stats}, } if err := out.Encode(report); err != nil { return err diff --git a/event/event_test.go b/event/event_test.go index 3940293013..2c56ecf29f 100644 --- a/event/event_test.go +++ b/event/event_test.go @@ -144,7 +144,7 @@ func TestMuxConcurrent(t *testing.T) { func emptySubscriber(mux *TypeMux, types ...interface{}) { s := mux.Subscribe(testEvent(0)) go func() { - for _ = range s.Chan() { + for range s.Chan() { } }() } @@ -187,7 +187,7 @@ func BenchmarkChanSend(b *testing.B) { c := make(chan interface{}) closed := make(chan struct{}) go func() { - for _ = range c { + for range c { } }() diff --git a/event/filter/generic_filter.go b/event/filter/generic_filter.go index 27f35920d4..d679b8bfa8 100644 --- a/event/filter/generic_filter.go +++ b/event/filter/generic_filter.go @@ -34,7 +34,7 @@ func (self Generic) Compare(f Filter) bool { strMatch = false } - for k, _ := range self.Data { + for k := range self.Data { if _, ok := filter.Data[k]; !ok { return false } diff --git a/les/fetcher.go b/les/fetcher.go index c23af8da31..4a0830a8ad 100644 --- a/les/fetcher.go +++ b/les/fetcher.go @@ -664,7 +664,7 @@ func (f *lightFetcher) updateMaxConfirmedTd(td *big.Int) { f.lastUpdateStats.next = newEntry } f.lastUpdateStats = newEntry - for p, _ := range f.peers { + for p := range f.peers { f.checkUpdateStats(p, newEntry) } } diff --git a/les/flowcontrol/manager.go b/les/flowcontrol/manager.go index 786884437b..f9f0294665 100644 --- a/les/flowcontrol/manager.go +++ b/les/flowcontrol/manager.go @@ -126,7 +126,7 @@ func (self *ClientManager) removeNode(node *cmNode) { // recalc sumWeight func (self *ClientManager) updateNodes(time int64) (rce bool) { var sumWeight, rcSum uint64 - for node, _ := range self.nodes { + for node := range self.nodes { rc := node.recharging node.update(time) if rc && !node.recharging { @@ -145,13 +145,13 @@ func (self *ClientManager) updateNodes(time int64) (rce bool) { func (self *ClientManager) update(time int64) { for { firstTime := time - for node, _ := range self.nodes { + for node := range self.nodes { if node.recharging && node.finishRecharge < firstTime { firstTime = node.finishRecharge } } if self.updateNodes(firstTime) { - for node, _ := range self.nodes { + for node := range self.nodes { if node.recharging { node.set(node.serving, self.simReqCnt, self.sumWeight) } diff --git a/les/handler_test.go b/les/handler_test.go index 37c5dd2268..0b94d0d30b 100644 --- a/les/handler_test.go +++ b/les/handler_test.go @@ -49,7 +49,7 @@ func testGetBlockHeaders(t *testing.T, protocol int) { // Create a "random" unknown hash for testing var unknown common.Hash - for i, _ := range unknown { + for i := range unknown { unknown[i] = byte(i) } // Create a batch of tests for various scenarios @@ -189,17 +189,17 @@ func testGetBlockBodies(t *testing.T, protocol int) { //{limit + 1, nil, nil, limit}, // No more than the possible block count should be returned {0, []common.Hash{bc.Genesis().Hash()}, []bool{true}, 1}, // The genesis block should be retrievable {0, []common.Hash{bc.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable - {0, []common.Hash{common.Hash{}}, []bool{false}, 0}, // A non existent block should not be returned + {0, []common.Hash{{}}, []bool{false}, 0}, // A non existent block should not be returned // Existing and non-existing blocks interleaved should not cause problems {0, []common.Hash{ - common.Hash{}, + {}, bc.GetBlockByNumber(1).Hash(), - common.Hash{}, + {}, bc.GetBlockByNumber(10).Hash(), - common.Hash{}, + {}, bc.GetBlockByNumber(100).Hash(), - common.Hash{}, + {}, }, []bool{false, true, false, true, false, true, false}, 3}, } // Run each of the tests and verify the results against the chain @@ -312,7 +312,7 @@ func testGetProofs(t *testing.T, protocol int) { var proofreqs []ProofReq var proofs [][]rlp.RawValue - accounts := []common.Address{testBankAddress, acc1Addr, acc2Addr, common.Address{}} + accounts := []common.Address{testBankAddress, acc1Addr, acc2Addr, {}} for i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ { header := bc.GetHeaderByNumber(i) root := header.Root diff --git a/les/peer.go b/les/peer.go index 0a8db4975f..8d4a83f59f 100644 --- a/les/peer.go +++ b/les/peer.go @@ -467,7 +467,7 @@ func (ps *peerSet) AllPeerIDs() []string { res := make([]string, len(ps.peers)) idx := 0 - for id, _ := range ps.peers { + for id := range ps.peers { res[idx] = id idx++ } diff --git a/les/randselect_test.go b/les/randselect_test.go index f3c34305ec..9ae7726ddd 100644 --- a/les/randselect_test.go +++ b/les/randselect_test.go @@ -39,7 +39,7 @@ func TestWeightedRandomSelect(t *testing.T) { s := newWeightedRandomSelect() w := -1 list := make([]testWrsItem, cnt) - for i, _ := range list { + for i := range list { list[i] = testWrsItem{idx: i, widx: &w} s.update(&list[i]) } diff --git a/les/serverpool.go b/les/serverpool.go index f5e8804604..80c446eef1 100644 --- a/les/serverpool.go +++ b/les/serverpool.go @@ -414,7 +414,7 @@ func (pool *serverPool) loadNodes() { // ordered from least to most recently connected. func (pool *serverPool) saveNodes() { list := make([]*poolEntry, len(pool.knownQueue.queue)) - for i, _ := range list { + for i := range list { list[i] = pool.knownQueue.fetchOldest() } enc, err := rlp.EncodeToBytes(list) diff --git a/les/sync.go b/les/sync.go index 72c979c61f..c143cb1453 100644 --- a/les/sync.go +++ b/les/sync.go @@ -43,12 +43,12 @@ func (pm *ProtocolManager) syncer() { for { select { case <-pm.newPeerCh: -/* // Make sure we have peers to select from, then sync - if pm.peers.Len() < minDesiredPeerCount { - break - } - go pm.synchronise(pm.peers.BestPeer()) -*/ + /* // Make sure we have peers to select from, then sync + if pm.peers.Len() < minDesiredPeerCount { + break + } + go pm.synchronise(pm.peers.BestPeer()) + */ /*case <-forceSync: // Force a sync even if not enough peers are present go pm.synchronise(pm.peers.BestPeer()) diff --git a/les/txrelay.go b/les/txrelay.go index 036158f5d2..84d049b45d 100644 --- a/les/txrelay.go +++ b/les/txrelay.go @@ -138,7 +138,7 @@ func (self *LesTxRelay) NewHead(head common.Hash, mined []common.Hash, rollback if len(self.txPending) > 0 { txs := make(types.Transactions, len(self.txPending)) i := 0 - for hash, _ := range self.txPending { + for hash := range self.txPending { txs[i] = self.txSent[hash].tx i++ } diff --git a/light/lightchain.go b/light/lightchain.go index d397f50063..0d28ad2f45 100644 --- a/light/lightchain.go +++ b/light/lightchain.go @@ -135,7 +135,7 @@ func NewLightChain(odr OdrBackend, config *params.ChainConfig, pow pow.PoW, mux return nil, err } // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain - for hash, _ := range core.BadHashes { + for hash := range core.BadHashes { if header := bc.GetHeaderByHash(hash); header != nil { glog.V(logger.Error).Infof("Found bad hash, rewinding chain to block #%d [%x…]", header.Number, header.ParentHash[:4]) bc.SetHead(header.Number.Uint64() - 1) diff --git a/light/txpool_test.go b/light/txpool_test.go index 61d7c1c499..6927c54f8a 100644 --- a/light/txpool_test.go +++ b/light/txpool_test.go @@ -73,7 +73,7 @@ func txPoolTestChainGen(i int, block *core.BlockGen) { } func TestTxPool(t *testing.T) { - for i, _ := range testTx { + for i := range testTx { testTx[i], _ = types.SignTx(types.NewTransaction(uint64(i), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil), types.HomesteadSigner{}, testBankKey) } diff --git a/logger/glog/glog.go b/logger/glog/glog.go index edaa21f075..0b33527c32 100644 --- a/logger/glog/glog.go +++ b/logger/glog/glog.go @@ -928,7 +928,7 @@ const flushInterval = 30 * time.Second // flushDaemon periodically flushes the log file buffers. func (l *loggingT) flushDaemon() { - for _ = range time.NewTicker(flushInterval).C { + for range time.NewTicker(flushInterval).C { l.lockAndFlushAll() } } diff --git a/miner/worker.go b/miner/worker.go index adb224c474..9e70c8f046 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -256,7 +256,7 @@ func (self *worker) update() { self.currentMu.Lock() acc, _ := types.Sender(self.current.signer, ev.Tx) - txs := map[common.Address]types.Transactions{acc: types.Transactions{ev.Tx}} + txs := map[common.Address]types.Transactions{acc: {ev.Tx}} txset := types.NewTransactionsByPriceAndNonce(txs) self.current.commitTransactions(self.mux, txset, self.gasPrice, self.chain) diff --git a/mobile/p2p.go b/mobile/p2p.go index e717d4004b..8d21639e55 100644 --- a/mobile/p2p.go +++ b/mobile/p2p.go @@ -38,7 +38,7 @@ func (ni *NodeInfo) GetListenerPort() int { return ni.info.Ports.Listener func (ni *NodeInfo) GetListenerAddress() string { return ni.info.ListenAddr } func (ni *NodeInfo) GetProtocols() *Strings { protos := []string{} - for proto, _ := range ni.info.Protocols { + for proto := range ni.info.Protocols { protos = append(protos, proto) } return &Strings{protos} diff --git a/node/node_test.go b/node/node_test.go index d9b26453bf..6b2b62d73b 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -166,7 +166,7 @@ func TestServiceLifeCycle(t *testing.T) { if err := stack.Start(); err != nil { t.Fatalf("failed to start protocol stack: %v", err) } - for id, _ := range services { + for id := range services { if !started[id] { t.Fatalf("service %s: freshly started service not running", id) } @@ -178,7 +178,7 @@ func TestServiceLifeCycle(t *testing.T) { if err := stack.Stop(); err != nil { t.Fatalf("failed to stop protocol stack: %v", err) } - for id, _ := range services { + for id := range services { if !stopped[id] { t.Fatalf("service %s: freshly terminated service still running", id) } @@ -270,7 +270,7 @@ func TestServiceConstructionAbortion(t *testing.T) { if err := stack.Start(); err != failure { t.Fatalf("iter %d: stack startup failure mismatch: have %v, want %v", i, err, failure) } - for id, _ := range services { + for id := range services { if started[id] { t.Fatalf("service %s: started should not have", id) } @@ -322,7 +322,7 @@ func TestServiceStartupAbortion(t *testing.T) { if err := stack.Start(); err != failure { t.Fatalf("iter %d: stack startup failure mismatch: have %v, want %v", i, err, failure) } - for id, _ := range services { + for id := range services { if started[id] && !stopped[id] { t.Fatalf("service %s: started but not stopped", id) } @@ -376,7 +376,7 @@ func TestServiceTerminationGuarantee(t *testing.T) { if err := stack.Start(); err != nil { t.Fatalf("iter %d: failed to start protocol stack: %v", i, err) } - for id, _ := range services { + for id := range services { if !started[id] { t.Fatalf("iter %d, service %s: service not running", i, id) } @@ -397,7 +397,7 @@ func TestServiceTerminationGuarantee(t *testing.T) { t.Fatalf("iter %d: failure count mismatch: have %d, want %d", i, len(err.Services), 1) } } - for id, _ := range services { + for id := range services { if !stopped[id] { t.Fatalf("iter %d, service %s: service not terminated", i, id) } diff --git a/p2p/discover/database_test.go b/p2p/discover/database_test.go index 5a729f02b9..be972fd2c3 100644 --- a/p2p/discover/database_test.go +++ b/p2p/discover/database_test.go @@ -242,12 +242,12 @@ func TestNodeDBSeedQuery(t *testing.T) { if len(seeds) != len(want) { t.Errorf("seed count mismatch: have %v, want %v", len(seeds), len(want)) } - for id, _ := range have { + for id := range have { if _, ok := want[id]; !ok { t.Errorf("extra seed: %v", id) } } - for id, _ := range want { + for id := range want { if _, ok := have[id]; !ok { t.Errorf("missing seed: %v", id) } diff --git a/p2p/discover/table.go b/p2p/discover/table.go index ad0b5c8ca3..839e3ec7e7 100644 --- a/p2p/discover/table.go +++ b/p2p/discover/table.go @@ -433,7 +433,7 @@ func (tab *Table) bondall(nodes []*Node) (result []*Node) { rc <- nn }(nodes[i]) } - for _ = range nodes { + for range nodes { if n := <-rc; n != nil { result = append(result, n) } diff --git a/p2p/discover/table_test.go b/p2p/discover/table_test.go index 102c7c2d13..1037cc6099 100644 --- a/p2p/discover/table_test.go +++ b/p2p/discover/table_test.go @@ -314,19 +314,19 @@ var lookupTestnet = &preminedTestnet{ target: MustHexID("166aea4f556532c6d34e8b740e5d314af7e9ac0ca79833bd751d6b665f12dfd38ec563c363b32f02aef4a80b44fd3def94612d497b99cb5f17fd24de454927ec"), targetSha: common.Hash{0x5c, 0x94, 0x4e, 0xe5, 0x1c, 0x5a, 0xe9, 0xf7, 0x2a, 0x95, 0xec, 0xcb, 0x8a, 0xed, 0x3, 0x74, 0xee, 0xcb, 0x51, 0x19, 0xd7, 0x20, 0xcb, 0xea, 0x68, 0x13, 0xe8, 0xe0, 0xd6, 0xad, 0x92, 0x61}, dists: [257][]NodeID{ - 240: []NodeID{ + 240: { MustHexID("2001ad5e3e80c71b952161bc0186731cf5ffe942d24a79230a0555802296238e57ea7a32f5b6f18564eadc1c65389448481f8c9338df0a3dbd18f708cbc2cbcb"), MustHexID("6ba3f4f57d084b6bf94cc4555b8c657e4a8ac7b7baf23c6874efc21dd1e4f56b7eb2721e07f5242d2f1d8381fc8cae535e860197c69236798ba1ad231b105794"), }, - 244: []NodeID{ + 244: { MustHexID("696ba1f0a9d55c59246f776600542a9e6432490f0cd78f8bb55a196918df2081a9b521c3c3ba48e465a75c10768807717f8f689b0b4adce00e1c75737552a178"), }, - 246: []NodeID{ + 246: { MustHexID("d6d32178bdc38416f46ffb8b3ec9e4cb2cfff8d04dd7e4311a70e403cb62b10be1b447311b60b4f9ee221a8131fc2cbd45b96dd80deba68a949d467241facfa8"), MustHexID("3ea3d04a43a3dfb5ac11cffc2319248cf41b6279659393c2f55b8a0a5fc9d12581a9d97ef5d8ff9b5abf3321a290e8f63a4f785f450dc8a672aba3ba2ff4fdab"), MustHexID("2fc897f05ae585553e5c014effd3078f84f37f9333afacffb109f00ca8e7a3373de810a3946be971cbccdfd40249f9fe7f322118ea459ac71acca85a1ef8b7f4"), }, - 247: []NodeID{ + 247: { MustHexID("3155e1427f85f10a5c9a7755877748041af1bcd8d474ec065eb33df57a97babf54bfd2103575fa829115d224c523596b401065a97f74010610fce76382c0bf32"), MustHexID("312c55512422cf9b8a4097e9a6ad79402e87a15ae909a4bfefa22398f03d20951933beea1e4dfa6f968212385e829f04c2d314fc2d4e255e0d3bc08792b069db"), MustHexID("38643200b172dcfef857492156971f0e6aa2c538d8b74010f8e140811d53b98c765dd2d96126051913f44582e8c199ad7c6d6819e9a56483f637feaac9448aac"), @@ -338,7 +338,7 @@ var lookupTestnet = &preminedTestnet{ MustHexID("4ab0a75941b12892369b4490a1928c8ca52a9ad6d3dffbd1d8c0b907bc200fe74c022d011ec39b64808a39c0ca41f1d3254386c3e7733e7044c44259486461b6"), MustHexID("d45150a72dc74388773e68e03133a3b5f51447fe91837d566706b3c035ee4b56f160c878c6273394daee7f56cc398985269052f22f75a8057df2fe6172765354"), }, - 248: []NodeID{ + 248: { MustHexID("6aadfce366a189bab08ac84721567483202c86590642ea6d6a14f37ca78d82bdb6509eb7b8b2f6f63c78ae3ae1d8837c89509e41497d719b23ad53dd81574afa"), MustHexID("a605ecfd6069a4cf4cf7f5840e5bc0ce10d23a3ac59e2aaa70c6afd5637359d2519b4524f56fc2ca180cdbebe54262f720ccaae8c1b28fd553c485675831624d"), MustHexID("29701451cb9448ca33fc33680b44b840d815be90146eb521641efbffed0859c154e8892d3906eae9934bfacee72cd1d2fa9dd050fd18888eea49da155ab0efd2"), @@ -356,7 +356,7 @@ var lookupTestnet = &preminedTestnet{ MustHexID("b76ea1a6fd6506ef6e3506a4f1f60ed6287fff8114af6141b2ff13e61242331b54082b023cfea5b3083354a4fb3f9eb8be01fb4a518f579e731a5d0707291a6b"), MustHexID("9b53a37950ca8890ee349b325032d7b672cab7eced178d3060137b24ef6b92a43977922d5bdfb4a3409a2d80128e02f795f9dae6d7d99973ad0e23a2afb8442f"), }, - 249: []NodeID{ + 249: { MustHexID("675ae65567c3c72c50c73bc0fd4f61f202ea5f93346ca57b551de3411ccc614fad61cb9035493af47615311b9d44ee7a161972ee4d77c28fe1ec029d01434e6a"), MustHexID("8eb81408389da88536ae5800392b16ef5109d7ea132c18e9a82928047ecdb502693f6e4a4cdd18b54296caf561db937185731456c456c98bfe7de0baf0eaa495"), MustHexID("2adba8b1612a541771cb93a726a38a4b88e97b18eced2593eb7daf82f05a5321ca94a72cc780c306ff21e551a932fc2c6d791e4681907b5ceab7f084c3fa2944"), @@ -374,7 +374,7 @@ var lookupTestnet = &preminedTestnet{ MustHexID("d94193f236105010972f5df1b7818b55846592a0445b9cdc4eaed811b8c4c0f7c27dc8cc9837a4774656d6b34682d6d329d42b6ebb55da1d475c2474dc3dfdf4"), MustHexID("edd9af6aded4094e9785637c28fccbd3980cbe28e2eb9a411048a23c2ace4bd6b0b7088a7817997b49a3dd05fc6929ca6c7abbb69438dbdabe65e971d2a794b2"), }, - 250: []NodeID{ + 250: { MustHexID("53a5bd1215d4ab709ae8fdc2ced50bba320bced78bd9c5dc92947fb402250c914891786db0978c898c058493f86fc68b1c5de8a5cb36336150ac7a88655b6c39"), MustHexID("b7f79e3ab59f79262623c9ccefc8f01d682323aee56ffbe295437487e9d5acaf556a9c92e1f1c6a9601f2b9eb6b027ae1aeaebac71d61b9b78e88676efd3e1a3"), MustHexID("d374bf7e8d7ffff69cc00bebff38ef5bc1dcb0a8d51c1a3d70e61ac6b2e2d6617109254b0ac224354dfbf79009fe4239e09020c483cc60c071e00b9238684f30"), @@ -392,7 +392,7 @@ var lookupTestnet = &preminedTestnet{ MustHexID("5b116f0751526868a909b61a30b0c5282c37df6925cc03ddea556ef0d0602a9595fd6c14d371f8ed7d45d89918a032dcd22be4342a8793d88fdbeb3ca3d75bd7"), MustHexID("50f3222fb6b82481c7c813b2172e1daea43e2710a443b9c2a57a12bd160dd37e20f87aa968c82ad639af6972185609d47036c0d93b4b7269b74ebd7073221c10"), }, - 251: []NodeID{ + 251: { MustHexID("9b8f702a62d1bee67bedfeb102eca7f37fa1713e310f0d6651cc0c33ea7c5477575289ccd463e5a2574a00a676a1fdce05658ba447bb9d2827f0ba47b947e894"), MustHexID("b97532eb83054ed054b4abdf413bb30c00e4205545c93521554dbe77faa3cfaa5bd31ef466a107b0b34a71ec97214c0c83919720142cddac93aa7a3e928d4708"), MustHexID("2f7a5e952bfb67f2f90b8441b5fadc9ee13b1dcde3afeeb3dd64bf937f86663cc5c55d1fa83952b5422763c7df1b7f2794b751c6be316ebc0beb4942e65ab8c1"), @@ -410,7 +410,7 @@ var lookupTestnet = &preminedTestnet{ MustHexID("fcc9a2e1ac3667026ff16192876d1813bb75abdbf39b929a92863012fe8b1d890badea7a0de36274d5c1eb1e8f975785532c50d80fd44b1a4b692f437303393f"), MustHexID("6d8b3efb461151dd4f6de809b62726f5b89e9b38e9ba1391967f61cde844f7528fecf821b74049207cee5a527096b31f3ad623928cd3ce51d926fa345a6b2951"), }, - 252: []NodeID{ + 252: { MustHexID("f1ae93157cc48c2075dd5868fbf523e79e06caf4b8198f352f6e526680b78ff4227263de92612f7d63472bd09367bb92a636fff16fe46ccf41614f7a72495c2a"), MustHexID("587f482d111b239c27c0cb89b51dd5d574db8efd8de14a2e6a1400c54d4567e77c65f89c1da52841212080b91604104768350276b6682f2f961cdaf4039581c7"), MustHexID("e3f88274d35cefdaabdf205afe0e80e936cc982b8e3e47a84ce664c413b29016a4fb4f3a3ebae0a2f79671f8323661ed462bf4390af94c424dc8ace0c301b90f"), @@ -428,7 +428,7 @@ var lookupTestnet = &preminedTestnet{ MustHexID("d84f06fe64debc4cd0625e36d19b99014b6218375262cc2209202bdbafd7dffcc4e34ce6398e182e02fd8faeed622c3e175545864902dfd3d1ac57647cddf4c6"), MustHexID("d0ed87b294f38f1d741eb601020eeec30ac16331d05880fe27868f1e454446de367d7457b41c79e202eaf9525b029e4f1d7e17d85a55f83a557c005c68d7328a"), }, - 253: []NodeID{ + 253: { MustHexID("ad4485e386e3cc7c7310366a7c38fb810b8896c0d52e55944bfd320ca294e7912d6c53c0a0cf85e7ce226e92491d60430e86f8f15cda0161ed71893fb4a9e3a1"), MustHexID("36d0e7e5b7734f98c6183eeeb8ac5130a85e910a925311a19c4941b1290f945d4fc3996b12ef4966960b6fa0fb29b1604f83a0f81bd5fd6398d2e1a22e46af0c"), MustHexID("7d307d8acb4a561afa23bdf0bd945d35c90245e26345ec3a1f9f7df354222a7cdcb81339c9ed6744526c27a1a0c8d10857e98df942fa433602facac71ac68a31"), @@ -446,7 +446,7 @@ var lookupTestnet = &preminedTestnet{ MustHexID("7a369b2b8962cc4c65900be046482fbf7c14f98a135bbbae25152c82ad168fb2097b3d1429197cf46d3ce9fdeb64808f908a489cc6019725db040060fdfe5405"), MustHexID("47bcae48288da5ecc7f5058dfa07cf14d89d06d6e449cb946e237aa6652ea050d9f5a24a65efdc0013ccf232bf88670979eddef249b054f63f38da9d7796dbd8"), }, - 254: []NodeID{ + 254: { MustHexID("099739d7abc8abd38ecc7a816c521a1168a4dbd359fa7212a5123ab583ffa1cf485a5fed219575d6475dbcdd541638b2d3631a6c7fce7474e7fe3cba1d4d5853"), MustHexID("c2b01603b088a7182d0cf7ef29fb2b04c70acb320fccf78526bf9472e10c74ee70b3fcfa6f4b11d167bd7d3bc4d936b660f2c9bff934793d97cb21750e7c3d31"), MustHexID("20e4d8f45f2f863e94b45548c1ef22a11f7d36f263e4f8623761e05a64c4572379b000a52211751e2561b0f14f4fc92dd4130410c8ccc71eb4f0e95a700d4ca9"), @@ -464,7 +464,7 @@ var lookupTestnet = &preminedTestnet{ MustHexID("82504b6eb49bb2c0f91a7006ce9cefdbaf6df38706198502c2e06601091fc9dc91e4f15db3410d45c6af355bc270b0f268d3dff560f956985c7332d4b10bd1ed"), MustHexID("b39b5b677b45944ceebe76e76d1f051de2f2a0ec7b0d650da52135743e66a9a5dba45f638258f9a7545d9a790c7fe6d3fdf82c25425c7887323e45d27d06c057"), }, - 255: []NodeID{ + 255: { MustHexID("5c4d58d46e055dd1f093f81ee60a675e1f02f54da6206720adee4dccef9b67a31efc5c2a2949c31a04ee31beadc79aba10da31440a1f9ff2a24093c63c36d784"), MustHexID("ea72161ffdd4b1e124c7b93b0684805f4c4b58d617ed498b37a145c670dbc2e04976f8785583d9c805ffbf343c31d492d79f841652bbbd01b61ed85640b23495"), MustHexID("51caa1d93352d47a8e531692a3612adac1e8ac68d0a200d086c1c57ae1e1a91aa285ab242e8c52ef9d7afe374c9485b122ae815f1707b875569d0433c1c3ce85"), @@ -482,7 +482,7 @@ var lookupTestnet = &preminedTestnet{ MustHexID("f492c6ee2696d5f682f7f537757e52744c2ae560f1090a07024609e903d334e9e174fc01609c5a229ddbcac36c9d21adaf6457dab38a25bfd44f2f0ee4277998"), MustHexID("459e4db99298cb0467a90acee6888b08bb857450deac11015cced5104853be5adce5b69c740968bc7f931495d671a70cad9f48546d7cd203357fe9af0e8d2164"), }, - 256: []NodeID{ + 256: { MustHexID("a8593af8a4aef7b806b5197612017951bac8845a1917ca9a6a15dd6086d608505144990b245785c4cd2d67a295701c7aac2aa18823fb0033987284b019656268"), MustHexID("d2eebef914928c3aad77fc1b2a495f52d2294acf5edaa7d8a530b540f094b861a68fe8348a46a7c302f08ab609d85912a4968eacfea0740847b29421b4795d9e"), MustHexID("b14bfcb31495f32b650b63cf7d08492e3e29071fdc73cf2da0da48d4b191a70ba1a65f42ad8c343206101f00f8a48e8db4b08bf3f622c0853e7323b250835b91"), diff --git a/p2p/discv5/database_test.go b/p2p/discv5/database_test.go index 4d3330ed22..a2ccb64671 100644 --- a/p2p/discv5/database_test.go +++ b/p2p/discv5/database_test.go @@ -242,12 +242,12 @@ func TestNodeDBSeedQuery(t *testing.T) { if len(seeds) != len(want) { t.Errorf("seed count mismatch: have %v, want %v", len(seeds), len(want)) } - for id, _ := range have { + for id := range have { if _, ok := want[id]; !ok { t.Errorf("extra seed: %v", id) } } - for id, _ := range want { + for id := range want { if _, ok := have[id]; !ok { t.Errorf("missing seed: %v", id) } diff --git a/p2p/discv5/net_test.go b/p2p/discv5/net_test.go index 327457c7c8..bd234f5ba6 100644 --- a/p2p/discv5/net_test.go +++ b/p2p/discv5/net_test.go @@ -69,19 +69,19 @@ var lookupTestnet = &preminedTestnet{ target: MustHexID("166aea4f556532c6d34e8b740e5d314af7e9ac0ca79833bd751d6b665f12dfd38ec563c363b32f02aef4a80b44fd3def94612d497b99cb5f17fd24de454927ec"), targetSha: common.Hash{0x5c, 0x94, 0x4e, 0xe5, 0x1c, 0x5a, 0xe9, 0xf7, 0x2a, 0x95, 0xec, 0xcb, 0x8a, 0xed, 0x3, 0x74, 0xee, 0xcb, 0x51, 0x19, 0xd7, 0x20, 0xcb, 0xea, 0x68, 0x13, 0xe8, 0xe0, 0xd6, 0xad, 0x92, 0x61}, dists: [257][]NodeID{ - 240: []NodeID{ + 240: { MustHexID("2001ad5e3e80c71b952161bc0186731cf5ffe942d24a79230a0555802296238e57ea7a32f5b6f18564eadc1c65389448481f8c9338df0a3dbd18f708cbc2cbcb"), MustHexID("6ba3f4f57d084b6bf94cc4555b8c657e4a8ac7b7baf23c6874efc21dd1e4f56b7eb2721e07f5242d2f1d8381fc8cae535e860197c69236798ba1ad231b105794"), }, - 244: []NodeID{ + 244: { MustHexID("696ba1f0a9d55c59246f776600542a9e6432490f0cd78f8bb55a196918df2081a9b521c3c3ba48e465a75c10768807717f8f689b0b4adce00e1c75737552a178"), }, - 246: []NodeID{ + 246: { MustHexID("d6d32178bdc38416f46ffb8b3ec9e4cb2cfff8d04dd7e4311a70e403cb62b10be1b447311b60b4f9ee221a8131fc2cbd45b96dd80deba68a949d467241facfa8"), MustHexID("3ea3d04a43a3dfb5ac11cffc2319248cf41b6279659393c2f55b8a0a5fc9d12581a9d97ef5d8ff9b5abf3321a290e8f63a4f785f450dc8a672aba3ba2ff4fdab"), MustHexID("2fc897f05ae585553e5c014effd3078f84f37f9333afacffb109f00ca8e7a3373de810a3946be971cbccdfd40249f9fe7f322118ea459ac71acca85a1ef8b7f4"), }, - 247: []NodeID{ + 247: { MustHexID("3155e1427f85f10a5c9a7755877748041af1bcd8d474ec065eb33df57a97babf54bfd2103575fa829115d224c523596b401065a97f74010610fce76382c0bf32"), MustHexID("312c55512422cf9b8a4097e9a6ad79402e87a15ae909a4bfefa22398f03d20951933beea1e4dfa6f968212385e829f04c2d314fc2d4e255e0d3bc08792b069db"), MustHexID("38643200b172dcfef857492156971f0e6aa2c538d8b74010f8e140811d53b98c765dd2d96126051913f44582e8c199ad7c6d6819e9a56483f637feaac9448aac"), @@ -93,7 +93,7 @@ var lookupTestnet = &preminedTestnet{ MustHexID("4ab0a75941b12892369b4490a1928c8ca52a9ad6d3dffbd1d8c0b907bc200fe74c022d011ec39b64808a39c0ca41f1d3254386c3e7733e7044c44259486461b6"), MustHexID("d45150a72dc74388773e68e03133a3b5f51447fe91837d566706b3c035ee4b56f160c878c6273394daee7f56cc398985269052f22f75a8057df2fe6172765354"), }, - 248: []NodeID{ + 248: { MustHexID("6aadfce366a189bab08ac84721567483202c86590642ea6d6a14f37ca78d82bdb6509eb7b8b2f6f63c78ae3ae1d8837c89509e41497d719b23ad53dd81574afa"), MustHexID("a605ecfd6069a4cf4cf7f5840e5bc0ce10d23a3ac59e2aaa70c6afd5637359d2519b4524f56fc2ca180cdbebe54262f720ccaae8c1b28fd553c485675831624d"), MustHexID("29701451cb9448ca33fc33680b44b840d815be90146eb521641efbffed0859c154e8892d3906eae9934bfacee72cd1d2fa9dd050fd18888eea49da155ab0efd2"), @@ -111,7 +111,7 @@ var lookupTestnet = &preminedTestnet{ MustHexID("b76ea1a6fd6506ef6e3506a4f1f60ed6287fff8114af6141b2ff13e61242331b54082b023cfea5b3083354a4fb3f9eb8be01fb4a518f579e731a5d0707291a6b"), MustHexID("9b53a37950ca8890ee349b325032d7b672cab7eced178d3060137b24ef6b92a43977922d5bdfb4a3409a2d80128e02f795f9dae6d7d99973ad0e23a2afb8442f"), }, - 249: []NodeID{ + 249: { MustHexID("675ae65567c3c72c50c73bc0fd4f61f202ea5f93346ca57b551de3411ccc614fad61cb9035493af47615311b9d44ee7a161972ee4d77c28fe1ec029d01434e6a"), MustHexID("8eb81408389da88536ae5800392b16ef5109d7ea132c18e9a82928047ecdb502693f6e4a4cdd18b54296caf561db937185731456c456c98bfe7de0baf0eaa495"), MustHexID("2adba8b1612a541771cb93a726a38a4b88e97b18eced2593eb7daf82f05a5321ca94a72cc780c306ff21e551a932fc2c6d791e4681907b5ceab7f084c3fa2944"), @@ -129,7 +129,7 @@ var lookupTestnet = &preminedTestnet{ MustHexID("d94193f236105010972f5df1b7818b55846592a0445b9cdc4eaed811b8c4c0f7c27dc8cc9837a4774656d6b34682d6d329d42b6ebb55da1d475c2474dc3dfdf4"), MustHexID("edd9af6aded4094e9785637c28fccbd3980cbe28e2eb9a411048a23c2ace4bd6b0b7088a7817997b49a3dd05fc6929ca6c7abbb69438dbdabe65e971d2a794b2"), }, - 250: []NodeID{ + 250: { MustHexID("53a5bd1215d4ab709ae8fdc2ced50bba320bced78bd9c5dc92947fb402250c914891786db0978c898c058493f86fc68b1c5de8a5cb36336150ac7a88655b6c39"), MustHexID("b7f79e3ab59f79262623c9ccefc8f01d682323aee56ffbe295437487e9d5acaf556a9c92e1f1c6a9601f2b9eb6b027ae1aeaebac71d61b9b78e88676efd3e1a3"), MustHexID("d374bf7e8d7ffff69cc00bebff38ef5bc1dcb0a8d51c1a3d70e61ac6b2e2d6617109254b0ac224354dfbf79009fe4239e09020c483cc60c071e00b9238684f30"), @@ -147,7 +147,7 @@ var lookupTestnet = &preminedTestnet{ MustHexID("5b116f0751526868a909b61a30b0c5282c37df6925cc03ddea556ef0d0602a9595fd6c14d371f8ed7d45d89918a032dcd22be4342a8793d88fdbeb3ca3d75bd7"), MustHexID("50f3222fb6b82481c7c813b2172e1daea43e2710a443b9c2a57a12bd160dd37e20f87aa968c82ad639af6972185609d47036c0d93b4b7269b74ebd7073221c10"), }, - 251: []NodeID{ + 251: { MustHexID("9b8f702a62d1bee67bedfeb102eca7f37fa1713e310f0d6651cc0c33ea7c5477575289ccd463e5a2574a00a676a1fdce05658ba447bb9d2827f0ba47b947e894"), MustHexID("b97532eb83054ed054b4abdf413bb30c00e4205545c93521554dbe77faa3cfaa5bd31ef466a107b0b34a71ec97214c0c83919720142cddac93aa7a3e928d4708"), MustHexID("2f7a5e952bfb67f2f90b8441b5fadc9ee13b1dcde3afeeb3dd64bf937f86663cc5c55d1fa83952b5422763c7df1b7f2794b751c6be316ebc0beb4942e65ab8c1"), @@ -165,7 +165,7 @@ var lookupTestnet = &preminedTestnet{ MustHexID("fcc9a2e1ac3667026ff16192876d1813bb75abdbf39b929a92863012fe8b1d890badea7a0de36274d5c1eb1e8f975785532c50d80fd44b1a4b692f437303393f"), MustHexID("6d8b3efb461151dd4f6de809b62726f5b89e9b38e9ba1391967f61cde844f7528fecf821b74049207cee5a527096b31f3ad623928cd3ce51d926fa345a6b2951"), }, - 252: []NodeID{ + 252: { MustHexID("f1ae93157cc48c2075dd5868fbf523e79e06caf4b8198f352f6e526680b78ff4227263de92612f7d63472bd09367bb92a636fff16fe46ccf41614f7a72495c2a"), MustHexID("587f482d111b239c27c0cb89b51dd5d574db8efd8de14a2e6a1400c54d4567e77c65f89c1da52841212080b91604104768350276b6682f2f961cdaf4039581c7"), MustHexID("e3f88274d35cefdaabdf205afe0e80e936cc982b8e3e47a84ce664c413b29016a4fb4f3a3ebae0a2f79671f8323661ed462bf4390af94c424dc8ace0c301b90f"), @@ -183,7 +183,7 @@ var lookupTestnet = &preminedTestnet{ MustHexID("d84f06fe64debc4cd0625e36d19b99014b6218375262cc2209202bdbafd7dffcc4e34ce6398e182e02fd8faeed622c3e175545864902dfd3d1ac57647cddf4c6"), MustHexID("d0ed87b294f38f1d741eb601020eeec30ac16331d05880fe27868f1e454446de367d7457b41c79e202eaf9525b029e4f1d7e17d85a55f83a557c005c68d7328a"), }, - 253: []NodeID{ + 253: { MustHexID("ad4485e386e3cc7c7310366a7c38fb810b8896c0d52e55944bfd320ca294e7912d6c53c0a0cf85e7ce226e92491d60430e86f8f15cda0161ed71893fb4a9e3a1"), MustHexID("36d0e7e5b7734f98c6183eeeb8ac5130a85e910a925311a19c4941b1290f945d4fc3996b12ef4966960b6fa0fb29b1604f83a0f81bd5fd6398d2e1a22e46af0c"), MustHexID("7d307d8acb4a561afa23bdf0bd945d35c90245e26345ec3a1f9f7df354222a7cdcb81339c9ed6744526c27a1a0c8d10857e98df942fa433602facac71ac68a31"), @@ -201,7 +201,7 @@ var lookupTestnet = &preminedTestnet{ MustHexID("7a369b2b8962cc4c65900be046482fbf7c14f98a135bbbae25152c82ad168fb2097b3d1429197cf46d3ce9fdeb64808f908a489cc6019725db040060fdfe5405"), MustHexID("47bcae48288da5ecc7f5058dfa07cf14d89d06d6e449cb946e237aa6652ea050d9f5a24a65efdc0013ccf232bf88670979eddef249b054f63f38da9d7796dbd8"), }, - 254: []NodeID{ + 254: { MustHexID("099739d7abc8abd38ecc7a816c521a1168a4dbd359fa7212a5123ab583ffa1cf485a5fed219575d6475dbcdd541638b2d3631a6c7fce7474e7fe3cba1d4d5853"), MustHexID("c2b01603b088a7182d0cf7ef29fb2b04c70acb320fccf78526bf9472e10c74ee70b3fcfa6f4b11d167bd7d3bc4d936b660f2c9bff934793d97cb21750e7c3d31"), MustHexID("20e4d8f45f2f863e94b45548c1ef22a11f7d36f263e4f8623761e05a64c4572379b000a52211751e2561b0f14f4fc92dd4130410c8ccc71eb4f0e95a700d4ca9"), @@ -219,7 +219,7 @@ var lookupTestnet = &preminedTestnet{ MustHexID("82504b6eb49bb2c0f91a7006ce9cefdbaf6df38706198502c2e06601091fc9dc91e4f15db3410d45c6af355bc270b0f268d3dff560f956985c7332d4b10bd1ed"), MustHexID("b39b5b677b45944ceebe76e76d1f051de2f2a0ec7b0d650da52135743e66a9a5dba45f638258f9a7545d9a790c7fe6d3fdf82c25425c7887323e45d27d06c057"), }, - 255: []NodeID{ + 255: { MustHexID("5c4d58d46e055dd1f093f81ee60a675e1f02f54da6206720adee4dccef9b67a31efc5c2a2949c31a04ee31beadc79aba10da31440a1f9ff2a24093c63c36d784"), MustHexID("ea72161ffdd4b1e124c7b93b0684805f4c4b58d617ed498b37a145c670dbc2e04976f8785583d9c805ffbf343c31d492d79f841652bbbd01b61ed85640b23495"), MustHexID("51caa1d93352d47a8e531692a3612adac1e8ac68d0a200d086c1c57ae1e1a91aa285ab242e8c52ef9d7afe374c9485b122ae815f1707b875569d0433c1c3ce85"), @@ -237,7 +237,7 @@ var lookupTestnet = &preminedTestnet{ MustHexID("f492c6ee2696d5f682f7f537757e52744c2ae560f1090a07024609e903d334e9e174fc01609c5a229ddbcac36c9d21adaf6457dab38a25bfd44f2f0ee4277998"), MustHexID("459e4db99298cb0467a90acee6888b08bb857450deac11015cced5104853be5adce5b69c740968bc7f931495d671a70cad9f48546d7cd203357fe9af0e8d2164"), }, - 256: []NodeID{ + 256: { MustHexID("a8593af8a4aef7b806b5197612017951bac8845a1917ca9a6a15dd6086d608505144990b245785c4cd2d67a295701c7aac2aa18823fb0033987284b019656268"), MustHexID("d2eebef914928c3aad77fc1b2a495f52d2294acf5edaa7d8a530b540f094b861a68fe8348a46a7c302f08ab609d85912a4968eacfea0740847b29421b4795d9e"), MustHexID("b14bfcb31495f32b650b63cf7d08492e3e29071fdc73cf2da0da48d4b191a70ba1a65f42ad8c343206101f00f8a48e8db4b08bf3f622c0853e7323b250835b91"), diff --git a/p2p/discv5/sim_test.go b/p2p/discv5/sim_test.go index cb64d7fa0c..3f7fe7463b 100644 --- a/p2p/discv5/sim_test.go +++ b/p2p/discv5/sim_test.go @@ -74,7 +74,7 @@ func TestSimTopics(t *testing.T) { go func() { nets := make([]*Network, 1024) - for i, _ := range nets { + for i := range nets { net := sim.launchNode(false) nets[i] = net if err := net.SetFallbackNodes([]*Node{bootnode.Self()}); err != nil { @@ -147,7 +147,7 @@ func TestSimTopics(t *testing.T) { func testHierarchicalTopics(i int) []Topic { digits := strconv.FormatInt(int64(128+i/8), 2) res := make([]Topic, 8) - for i, _ := range res { + for i := range res { res[i] = Topic("foo" + digits[1:i+1]) } return res @@ -167,7 +167,7 @@ func TestSimTopicHierarchy(t *testing.T) { go func() { nets := make([]*Network, 1024) - for i, _ := range nets { + for i := range nets { net := sim.launchNode(false) nets[i] = net if err := net.SetFallbackNodes([]*Node{bootnode.Self()}); err != nil { diff --git a/p2p/discv5/ticket.go b/p2p/discv5/ticket.go index 752fdc9b41..48dd114f06 100644 --- a/p2p/discv5/ticket.go +++ b/p2p/discv5/ticket.go @@ -831,7 +831,7 @@ func (r *topicRadius) recalcRadius() (radius uint64, radiusLookup int) { maxValue := float64(0) now := mclock.Now() v := float64(0) - for i, _ := range r.buckets { + for i := range r.buckets { r.buckets[i].update(now) v += r.buckets[i].weights[trOutside] - r.buckets[i].weights[trInside] r.buckets[i].value = v diff --git a/p2p/discv5/topic.go b/p2p/discv5/topic.go index 625921e84c..b6bea013c7 100644 --- a/p2p/discv5/topic.go +++ b/p2p/discv5/topic.go @@ -316,7 +316,7 @@ func (t *topicTable) collectGarbage() { t.checkDeleteNode(node) } - for topic, _ := range t.topics { + for topic := range t.topics { t.checkDeleteTopic(topic) } } diff --git a/p2p/nat/natpmp.go b/p2p/nat/natpmp.go index c2f9408914..577a424fbe 100644 --- a/p2p/nat/natpmp.go +++ b/p2p/nat/natpmp.go @@ -82,7 +82,7 @@ func discoverPMP() Interface { // any responses after a very short timeout. timeout := time.NewTimer(1 * time.Second) defer timeout.Stop() - for _ = range gws { + for range gws { select { case c := <-found: if c != nil { diff --git a/p2p/peer_test.go b/p2p/peer_test.go index 6f96a823b4..f44300b15c 100644 --- a/p2p/peer_test.go +++ b/p2p/peer_test.go @@ -299,7 +299,7 @@ func TestMatchProtocols(t *testing.T) { } } // Make sure no protocols missed negotiation - for name, _ := range tt.Match { + for name := range tt.Match { if _, ok := result[name]; !ok { t.Errorf("test %d, proto '%s': not negotiated, should have", i, name) continue diff --git a/rpc/json.go b/rpc/json.go index ac5a4acd32..61a4ddf432 100644 --- a/rpc/json.go +++ b/rpc/json.go @@ -166,7 +166,7 @@ func parseRequest(incomingMsg json.RawMessage) ([]rpcRequest, bool, Error) { // subscribe are special, they will always use `subscribeMethod` as first param in the payload if in.Method == subscribeMethod { - reqs := []rpcRequest{rpcRequest{id: &in.Id, isPubSub: true}} + reqs := []rpcRequest{{id: &in.Id, isPubSub: true}} if len(in.Payload) > 0 { // first param must be subscription name var subscribeMethod [1]string @@ -184,7 +184,7 @@ func parseRequest(incomingMsg json.RawMessage) ([]rpcRequest, bool, Error) { } if in.Method == unsubscribeMethod { - return []rpcRequest{rpcRequest{id: &in.Id, isPubSub: true, + return []rpcRequest{{id: &in.Id, isPubSub: true, method: unsubscribeMethod, params: in.Payload}}, false, nil } @@ -195,10 +195,10 @@ func parseRequest(incomingMsg json.RawMessage) ([]rpcRequest, bool, Error) { // regular RPC call if len(in.Payload) == 0 { - return []rpcRequest{rpcRequest{service: elems[0], method: elems[1], id: &in.Id}}, false, nil + return []rpcRequest{{service: elems[0], method: elems[1], id: &in.Id}}, false, nil } - return []rpcRequest{rpcRequest{service: elems[0], method: elems[1], id: &in.Id, params: in.Payload}}, false, nil + return []rpcRequest{{service: elems[0], method: elems[1], id: &in.Id, params: in.Payload}}, false, nil } // parseBatchRequest will parse a batch request into a collection of requests from the given RawMessage, an indication diff --git a/swarm/storage/dpa_test.go b/swarm/storage/dpa_test.go index 1cde1c00e7..a682324072 100644 --- a/swarm/storage/dpa_test.go +++ b/swarm/storage/dpa_test.go @@ -67,7 +67,7 @@ func TestDPArandom(t *testing.T) { ioutil.WriteFile("/tmp/result.bzz.16M", resultSlice, 0666) localStore.memStore = NewMemStore(dbStore, defaultCacheCapacity) resultReader = dpa.Retrieve(key) - for i, _ := range resultSlice { + for i := range resultSlice { resultSlice[i] = 0 } n, err = resultReader.ReadAt(resultSlice, 0) @@ -128,7 +128,7 @@ func TestDPA_capacity(t *testing.T) { dpa.ChunkStore = localStore // localStore.dbStore.setCapacity(0) resultReader = dpa.Retrieve(key) - for i, _ := range resultSlice { + for i := range resultSlice { resultSlice[i] = 0 } n, err = resultReader.ReadAt(resultSlice, 0) diff --git a/trie/encoding.go b/trie/encoding.go index 761bad1889..2037118ddf 100644 --- a/trie/encoding.go +++ b/trie/encoding.go @@ -80,7 +80,7 @@ func compactHexEncode(nibbles []byte) []byte { } l := (nl + 1) / 2 var str = make([]byte, l) - for i, _ := range str { + for i := range str { b := nibbles[i*2] * 16 if nl > i*2 { b += nibbles[i*2+1] diff --git a/trie/iterator_test.go b/trie/iterator_test.go index 2bcc3700e6..c56ac85be5 100644 --- a/trie/iterator_test.go +++ b/trie/iterator_test.go @@ -105,7 +105,7 @@ func TestNodeIteratorCoverage(t *testing.T) { } } // Cross check the hashes and the database itself - for hash, _ := range hashes { + for hash := range hashes { if _, err := db.Get(hash.Bytes()); err != nil { t.Errorf("failed to retrieve reported node %x: %v", hash, err) } diff --git a/trie/sync_test.go b/trie/sync_test.go index 5edbd0746d..6405a51c32 100644 --- a/trie/sync_test.go +++ b/trie/sync_test.go @@ -182,7 +182,7 @@ func testIterativeRandomTrieSync(t *testing.T, batch int) { for len(queue) > 0 { // Fetch all the queued nodes in a random order results := make([]SyncResult, 0, len(queue)) - for hash, _ := range queue { + for hash := range queue { data, err := srcDb.Get(hash.Bytes()) if err != nil { t.Fatalf("failed to retrieve node data for %x: %v", hash, err) @@ -219,7 +219,7 @@ func TestIterativeRandomDelayedTrieSync(t *testing.T) { for len(queue) > 0 { // Sync only half of the scheduled nodes, even those in random order results := make([]SyncResult, 0, len(queue)/2+1) - for hash, _ := range queue { + for hash := range queue { data, err := srcDb.Get(hash.Bytes()) if err != nil { t.Fatalf("failed to retrieve node data for %x: %v", hash, err) diff --git a/whisper/whisperv2/filter.go b/whisper/whisperv2/filter.go index 8ce4a54fb4..ed7dcd6aed 100644 --- a/whisper/whisperv2/filter.go +++ b/whisper/whisperv2/filter.go @@ -116,7 +116,7 @@ func (self filterer) Compare(f filter.Filter) bool { topics := make([]Topic, len(filter.matcher.conditions)) for i, group := range filter.matcher.conditions { // Message should contain a single topic entry, extract - for topics[i], _ = range group { + for topics[i] = range group { break } } diff --git a/whisper/whisperv2/peer.go b/whisper/whisperv2/peer.go index 404ebd513e..f09ce3523a 100644 --- a/whisper/whisperv2/peer.go +++ b/whisper/whisperv2/peer.go @@ -149,7 +149,7 @@ func (self *peer) expire() { return true }) // Dump all known but unavailable - for hash, _ := range unmark { + for hash := range unmark { self.known.Remove(hash) } } diff --git a/whisper/whisperv2/peer_test.go b/whisper/whisperv2/peer_test.go index 9755e134c4..87ca5063df 100644 --- a/whisper/whisperv2/peer_test.go +++ b/whisper/whisperv2/peer_test.go @@ -221,7 +221,7 @@ func TestPeerMessageExpiration(t *testing.T) { t.Fatalf("peer pool size mismatch: have %v, want %v", peers, 1) } var peer *peer - for peer, _ = range tester.client.peers { + for peer = range tester.client.peers { break } tester.client.peerMu.RUnlock() diff --git a/whisper/whisperv2/topic_test.go b/whisper/whisperv2/topic_test.go index efd4a2c61b..66c84ba352 100644 --- a/whisper/whisperv2/topic_test.go +++ b/whisper/whisperv2/topic_test.go @@ -73,30 +73,30 @@ var topicMatcherCreationTest = struct { matcher []map[[4]byte]struct{} }{ binary: [][][]byte{ - [][]byte{}, - [][]byte{ + {}, + { []byte("Topic A"), }, - [][]byte{ + { []byte("Topic B1"), []byte("Topic B2"), []byte("Topic B3"), }, }, textual: [][]string{ - []string{}, - []string{"Topic A"}, - []string{"Topic B1", "Topic B2", "Topic B3"}, + {}, + {"Topic A"}, + {"Topic B1", "Topic B2", "Topic B3"}, }, matcher: []map[[4]byte]struct{}{ - map[[4]byte]struct{}{}, - map[[4]byte]struct{}{ - [4]byte{0x25, 0xfc, 0x95, 0x66}: struct{}{}, + {}, + { + {0x25, 0xfc, 0x95, 0x66}: {}, }, - map[[4]byte]struct{}{ - [4]byte{0x93, 0x6d, 0xec, 0x09}: struct{}{}, - [4]byte{0x25, 0x23, 0x34, 0xd3}: struct{}{}, - [4]byte{0x6b, 0xc2, 0x73, 0xd1}: struct{}{}, + { + {0x93, 0x6d, 0xec, 0x09}: {}, + {0x25, 0x23, 0x34, 0xd3}: {}, + {0x6b, 0xc2, 0x73, 0xd1}: {}, }, }, } @@ -106,14 +106,14 @@ func TestTopicMatcherCreation(t *testing.T) { matcher := newTopicMatcherFromBinary(test.binary...) for i, cond := range matcher.conditions { - for topic, _ := range cond { + for topic := range cond { if _, ok := test.matcher[i][topic]; !ok { t.Errorf("condition %d; extra topic found: 0x%x", i, topic[:]) } } } for i, cond := range test.matcher { - for topic, _ := range cond { + for topic := range cond { if _, ok := matcher.conditions[i][topic]; !ok { t.Errorf("condition %d; topic not found: 0x%x", i, topic[:]) } @@ -122,14 +122,14 @@ func TestTopicMatcherCreation(t *testing.T) { matcher = newTopicMatcherFromStrings(test.textual...) for i, cond := range matcher.conditions { - for topic, _ := range cond { + for topic := range cond { if _, ok := test.matcher[i][topic]; !ok { t.Errorf("condition %d; extra topic found: 0x%x", i, topic[:]) } } } for i, cond := range test.matcher { - for topic, _ := range cond { + for topic := range cond { if _, ok := matcher.conditions[i][topic]; !ok { t.Errorf("condition %d; topic not found: 0x%x", i, topic[:]) } @@ -155,49 +155,49 @@ var topicMatcherTests = []struct { }, // Fixed topic matcher should match strictly, but only prefix { - filter: [][]string{[]string{"a"}, []string{"b"}}, + filter: [][]string{{"a"}, {"b"}}, topics: []string{"a"}, match: false, }, { - filter: [][]string{[]string{"a"}, []string{"b"}}, + filter: [][]string{{"a"}, {"b"}}, topics: []string{"a", "b"}, match: true, }, { - filter: [][]string{[]string{"a"}, []string{"b"}}, + filter: [][]string{{"a"}, {"b"}}, topics: []string{"a", "b", "c"}, match: true, }, // Multi-matcher should match any from a sub-group { - filter: [][]string{[]string{"a1", "a2"}}, + filter: [][]string{{"a1", "a2"}}, topics: []string{"a"}, match: false, }, { - filter: [][]string{[]string{"a1", "a2"}}, + filter: [][]string{{"a1", "a2"}}, topics: []string{"a1"}, match: true, }, { - filter: [][]string{[]string{"a1", "a2"}}, + filter: [][]string{{"a1", "a2"}}, topics: []string{"a2"}, match: true, }, // Wild-card condition should match anything { - filter: [][]string{[]string{}, []string{"b"}}, + filter: [][]string{{}, {"b"}}, topics: []string{"a"}, match: false, }, { - filter: [][]string{[]string{}, []string{"b"}}, + filter: [][]string{{}, {"b"}}, topics: []string{"a", "b"}, match: true, }, { - filter: [][]string{[]string{}, []string{"b"}}, + filter: [][]string{{}, {"b"}}, topics: []string{"b", "b"}, match: true, }, diff --git a/whisper/whisperv5/peer.go b/whisper/whisperv5/peer.go index 4273cfce1f..6340455045 100644 --- a/whisper/whisperv5/peer.go +++ b/whisper/whisperv5/peer.go @@ -148,7 +148,7 @@ func (peer *Peer) expire() { return true }) // Dump all known but unavailable - for hash, _ := range unmark { + for hash := range unmark { peer.known.Remove(hash) } } diff --git a/whisper/whisperv5/whisper.go b/whisper/whisperv5/whisper.go index 789adbdb3e..b514c022e5 100644 --- a/whisper/whisperv5/whisper.go +++ b/whisper/whisperv5/whisper.go @@ -105,7 +105,7 @@ func (w *Whisper) Version() uint { func (w *Whisper) getPeer(peerID []byte) (*Peer, error) { w.peerMu.Lock() defer w.peerMu.Unlock() - for p, _ := range w.peers { + for p := range w.peers { id := p.peer.ID() if bytes.Equal(peerID, id[:]) { return p, nil From f2da6581ba827a2aab091f764ace8017b26450d8 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Fri, 6 Jan 2017 16:44:20 +0100 Subject: [PATCH 2/3] all: fix issues reported by honnef.co/go/simple/cmd/gosimple --- accounts/abi/bind/bind.go | 20 ++++++++++---------- accounts/abi/type.go | 2 +- cmd/ethtest/main.go | 7 +------ cmd/geth/accountcmd_test.go | 8 ++++---- cmd/utils/cmd.go | 2 +- common/big_test.go | 6 +++--- common/bytes_test.go | 4 ++-- common/compiler/solidity.go | 2 +- common/format.go | 2 +- compression/rle/read_write.go | 4 ++-- console/console.go | 11 ++++------- core/blockchain.go | 5 +---- core/dao.go | 4 ++-- core/database_util_test.go | 4 ++-- core/state/iterator.go | 2 +- core/state/sync_test.go | 4 ++-- core/types/bloom9.go | 8 +------- crypto/crypto_test.go | 2 +- crypto/ecies/ecies.go | 5 ++--- eth/handler.go | 30 ++++-------------------------- ethstats/ethstats.go | 29 ++++++----------------------- miner/worker.go | 2 +- mobile/bind.go | 29 +++++------------------------ node/config_test.go | 2 +- node/node_test.go | 4 ++-- p2p/discover/database.go | 2 +- p2p/discover/node.go | 5 +---- p2p/discv5/database.go | 2 +- p2p/discv5/node.go | 5 +---- swarm/storage/dbstore.go | 2 +- swarm/storage/types.go | 2 +- tests/block_test_util.go | 4 +--- tests/init.go | 6 +----- tests/state_test_util.go | 2 +- tests/vm_test_util.go | 2 +- trie/sync_test.go | 2 +- whisper/shhapi/api.go | 6 +----- whisper/shhapi/api_test.go | 2 +- whisper/whisperv2/envelope_test.go | 12 ++++++------ whisper/whisperv2/filter.go | 5 +---- whisper/whisperv2/filter_test.go | 8 ++++---- whisper/whisperv2/message_test.go | 4 ++-- whisper/whisperv2/topic_test.go | 8 ++++---- whisper/whisperv5/message_test.go | 8 ++++---- whisper/whisperv5/peer_test.go | 2 +- whisper/whisperv5/whisper_test.go | 6 +++--- 46 files changed, 99 insertions(+), 194 deletions(-) diff --git a/accounts/abi/bind/bind.go b/accounts/abi/bind/bind.go index 84cf22e3c3..73e95e02a1 100644 --- a/accounts/abi/bind/bind.go +++ b/accounts/abi/bind/bind.go @@ -147,21 +147,21 @@ func bindTypeGo(kind abi.Type) string { switch { case strings.HasPrefix(stringKind, "address"): - parts := regexp.MustCompile("address(\\[[0-9]*\\])?").FindStringSubmatch(stringKind) + parts := regexp.MustCompile(`address(\[[0-9]*\])?`).FindStringSubmatch(stringKind) if len(parts) != 2 { return stringKind } return fmt.Sprintf("%scommon.Address", parts[1]) case strings.HasPrefix(stringKind, "bytes"): - parts := regexp.MustCompile("bytes([0-9]*)(\\[[0-9]*\\])?").FindStringSubmatch(stringKind) + parts := regexp.MustCompile(`bytes([0-9]*)(\[[0-9]*\])?`).FindStringSubmatch(stringKind) if len(parts) != 3 { return stringKind } return fmt.Sprintf("%s[%s]byte", parts[2], parts[1]) case strings.HasPrefix(stringKind, "int") || strings.HasPrefix(stringKind, "uint"): - parts := regexp.MustCompile("(u)?int([0-9]*)(\\[[0-9]*\\])?").FindStringSubmatch(stringKind) + parts := regexp.MustCompile(`(u)?int([0-9]*)(\[[0-9]*\])?`).FindStringSubmatch(stringKind) if len(parts) != 4 { return stringKind } @@ -172,7 +172,7 @@ func bindTypeGo(kind abi.Type) string { return fmt.Sprintf("%s*big.Int", parts[3]) case strings.HasPrefix(stringKind, "bool") || strings.HasPrefix(stringKind, "string"): - parts := regexp.MustCompile("([a-z]+)(\\[[0-9]*\\])?").FindStringSubmatch(stringKind) + parts := regexp.MustCompile(`([a-z]+)(\[[0-9]*\])?`).FindStringSubmatch(stringKind) if len(parts) != 3 { return stringKind } @@ -191,7 +191,7 @@ func bindTypeJava(kind abi.Type) string { switch { case strings.HasPrefix(stringKind, "address"): - parts := regexp.MustCompile("address(\\[[0-9]*\\])?").FindStringSubmatch(stringKind) + parts := regexp.MustCompile(`address(\[[0-9]*\])?`).FindStringSubmatch(stringKind) if len(parts) != 2 { return stringKind } @@ -201,7 +201,7 @@ func bindTypeJava(kind abi.Type) string { return fmt.Sprintf("Addresses") case strings.HasPrefix(stringKind, "bytes"): - parts := regexp.MustCompile("bytes([0-9]*)(\\[[0-9]*\\])?").FindStringSubmatch(stringKind) + parts := regexp.MustCompile(`bytes([0-9]*)(\[[0-9]*\])?`).FindStringSubmatch(stringKind) if len(parts) != 3 { return stringKind } @@ -211,7 +211,7 @@ func bindTypeJava(kind abi.Type) string { return "byte[]" case strings.HasPrefix(stringKind, "int") || strings.HasPrefix(stringKind, "uint"): - parts := regexp.MustCompile("(u)?int([0-9]*)(\\[[0-9]*\\])?").FindStringSubmatch(stringKind) + parts := regexp.MustCompile(`(u)?int([0-9]*)(\[[0-9]*\])?`).FindStringSubmatch(stringKind) if len(parts) != 4 { return stringKind } @@ -230,7 +230,7 @@ func bindTypeJava(kind abi.Type) string { return fmt.Sprintf("BigInts") case strings.HasPrefix(stringKind, "bool"): - parts := regexp.MustCompile("bool(\\[[0-9]*\\])?").FindStringSubmatch(stringKind) + parts := regexp.MustCompile(`bool(\[[0-9]*\])?`).FindStringSubmatch(stringKind) if len(parts) != 2 { return stringKind } @@ -240,7 +240,7 @@ func bindTypeJava(kind abi.Type) string { return fmt.Sprintf("bool[]") case strings.HasPrefix(stringKind, "string"): - parts := regexp.MustCompile("string(\\[[0-9]*\\])?").FindStringSubmatch(stringKind) + parts := regexp.MustCompile(`string(\[[0-9]*\])?`).FindStringSubmatch(stringKind) if len(parts) != 2 { return stringKind } @@ -278,7 +278,7 @@ func namedTypeJava(javaKind string, solKind abi.Type) string { case "bool[]": return "Bools" case "BigInt": - parts := regexp.MustCompile("(u)?int([0-9]*)(\\[[0-9]*\\])?").FindStringSubmatch(solKind.String()) + parts := regexp.MustCompile(`(u)?int([0-9]*)(\[[0-9]*\])?`).FindStringSubmatch(solKind.String()) if len(parts) != 4 { return javaKind } diff --git a/accounts/abi/type.go b/accounts/abi/type.go index 7af7ff386f..2bd341bd25 100644 --- a/accounts/abi/type.go +++ b/accounts/abi/type.go @@ -65,7 +65,7 @@ var ( // string int uint fixed // string32 int8 uint8 uint[] // address int256 uint256 fixed128x128[2] - fullTypeRegex = regexp.MustCompile("([a-zA-Z0-9]+)(\\[([0-9]*)\\])?") + fullTypeRegex = regexp.MustCompile(`([a-zA-Z0-9]+)(\[([0-9]*)\])?`) // typeRegex parses the abi sub types typeRegex = regexp.MustCompile("([a-zA-Z]+)(([0-9]+)(x([0-9]+))?)?") ) diff --git a/cmd/ethtest/main.go b/cmd/ethtest/main.go index 7ce663dc03..14b8395798 100644 --- a/cmd/ethtest/main.go +++ b/cmd/ethtest/main.go @@ -88,12 +88,7 @@ func runTestWithReader(test string, r io.Reader) error { default: err = fmt.Errorf("Invalid test type specified: %v", test) } - - if err != nil { - return err - } - - return nil + return err } func getFiles(path string) ([]string, error) { diff --git a/cmd/geth/accountcmd_test.go b/cmd/geth/accountcmd_test.go index b6abde6d89..113df983e2 100644 --- a/cmd/geth/accountcmd_test.go +++ b/cmd/geth/accountcmd_test.go @@ -148,7 +148,7 @@ Passphrase: {{.InputLine "foobar"}} "Unlocked account f466859ead1932d743d622cb74fc058882e8648a", } for _, m := range wantMessages { - if strings.Index(geth.stderrText(), m) == -1 { + if !strings.Contains(geth.stderrText(), m) { t.Errorf("stderr text does not contain %q", m) } } @@ -193,7 +193,7 @@ Passphrase: {{.InputLine "foobar"}} "Unlocked account 289d485d9771714cce91d3393d764e1311907acc", } for _, m := range wantMessages { - if strings.Index(geth.stderrText(), m) == -1 { + if !strings.Contains(geth.stderrText(), m) { t.Errorf("stderr text does not contain %q", m) } } @@ -212,7 +212,7 @@ func TestUnlockFlagPasswordFile(t *testing.T) { "Unlocked account 289d485d9771714cce91d3393d764e1311907acc", } for _, m := range wantMessages { - if strings.Index(geth.stderrText(), m) == -1 { + if !strings.Contains(geth.stderrText(), m) { t.Errorf("stderr text does not contain %q", m) } } @@ -260,7 +260,7 @@ In order to avoid this warning, you need to remove the following duplicate key f "Unlocked account f466859ead1932d743d622cb74fc058882e8648a", } for _, m := range wantMessages { - if strings.Index(geth.stderrText(), m) == -1 { + if !strings.Contains(geth.stderrText(), m) { t.Errorf("stderr text does not contain %q", m) } } diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go index a56507e4d7..287efc9c83 100644 --- a/cmd/utils/cmd.go +++ b/cmd/utils/cmd.go @@ -95,7 +95,7 @@ func StartNode(stack *node.Node) { func FormatTransactionData(data string) []byte { d := common.StringToByteFunc(data, func(s string) (ret []byte) { - slice := regexp.MustCompile("\\n|\\s").Split(s, 1000000000) + slice := regexp.MustCompile(`\n|\s`).Split(s, 1000000000) for _, dataItem := range slice { d := common.FormatData(dataItem) ret = append(ret, d...) diff --git a/common/big_test.go b/common/big_test.go index 1eb0c0c1fd..4d04a8db36 100644 --- a/common/big_test.go +++ b/common/big_test.go @@ -27,7 +27,7 @@ func TestMisc(t *testing.T) { c := []byte{1, 2, 3, 4} z := BitTest(a, 1) - if z != true { + if !z { t.Error("Expected true got", z) } @@ -79,11 +79,11 @@ func TestBigCopy(t *testing.T) { z := BigToBytes(c, 16) zbytes := []byte{232, 212, 165, 16, 0} - if bytes.Compare(y, ybytes) != 0 { + if !bytes.Equal(y, ybytes) { t.Error("Got", ybytes) } - if bytes.Compare(z, zbytes) != 0 { + if !bytes.Equal(z, zbytes) { t.Error("Got", zbytes) } } diff --git a/common/bytes_test.go b/common/bytes_test.go index 2e52084777..98d402c489 100644 --- a/common/bytes_test.go +++ b/common/bytes_test.go @@ -181,7 +181,7 @@ func TestFromHex(t *testing.T) { input := "0x01" expected := []byte{1} result := FromHex(input) - if bytes.Compare(expected, result) != 0 { + if !bytes.Equal(expected, result) { t.Errorf("Expected % x got % x", expected, result) } } @@ -190,7 +190,7 @@ func TestFromHexOddLength(t *testing.T) { input := "0x1" expected := []byte{1} result := FromHex(input) - if bytes.Compare(expected, result) != 0 { + if !bytes.Equal(expected, result) { t.Errorf("Expected % x got % x", expected, result) } } diff --git a/common/compiler/solidity.go b/common/compiler/solidity.go index b0ca9bd75a..d27bddd9ff 100644 --- a/common/compiler/solidity.go +++ b/common/compiler/solidity.go @@ -32,7 +32,7 @@ import ( ) var ( - versionRegexp = regexp.MustCompile("[0-9]+\\.[0-9]+\\.[0-9]+") + versionRegexp = regexp.MustCompile(`[0-9]+\.[0-9]+\.[0-9]+`) solcParams = []string{ "--combined-json", "bin,abi,userdoc,devdoc", "--add-std", // include standard lib contracts diff --git a/common/format.go b/common/format.go index 119637d2e4..fccc299620 100644 --- a/common/format.go +++ b/common/format.go @@ -27,7 +27,7 @@ import ( // the unnecessary precision off from the formatted textual representation. type PrettyDuration time.Duration -var prettyDurationRe = regexp.MustCompile("\\.[0-9]+") +var prettyDurationRe = regexp.MustCompile(`\.[0-9]+`) // String implements the Stringer interface, allowing pretty printing of duration // values rounded to three decimals. diff --git a/compression/rle/read_write.go b/compression/rle/read_write.go index 03dffd607d..0e7ad90aec 100644 --- a/compression/rle/read_write.go +++ b/compression/rle/read_write.go @@ -76,9 +76,9 @@ func compressChunk(dat []byte) (ret []byte, n int) { } return []byte{token, byte(j + 2)}, j case len(dat) >= 32: - if dat[0] == empty[0] && bytes.Compare(dat[:32], empty) == 0 { + if dat[0] == empty[0] && bytes.Equal(dat[:32], empty) { return []byte{token, emptyShaToken}, 32 - } else if dat[0] == emptyList[0] && bytes.Compare(dat[:32], emptyList) == 0 { + } else if dat[0] == emptyList[0] && bytes.Equal(dat[:32], emptyList) { return []byte{token, emptyListShaToken}, 32 } fallthrough diff --git a/console/console.go b/console/console.go index 6e3d7e43cb..8865f5e899 100644 --- a/console/console.go +++ b/console/console.go @@ -36,9 +36,9 @@ import ( ) var ( - passwordRegexp = regexp.MustCompile("personal.[nus]") - onlyWhitespace = regexp.MustCompile("^\\s*$") - exit = regexp.MustCompile("^\\s*exit\\s*;*\\s*$") + passwordRegexp = regexp.MustCompile(`personal.[nus]`) + onlyWhitespace = regexp.MustCompile(`^\s*$`) + exit = regexp.MustCompile(`^\s*exit\s*;*\s*$`) ) // HistoryFile is the file within the data directory to store input scrollback. @@ -275,10 +275,7 @@ func (c *Console) Evaluate(statement string) error { fmt.Fprintf(c.printer, "[native] error: %v\n", r) } }() - if err := c.jsre.Evaluate(statement, c.printer); err != nil { - return err - } - return nil + return c.jsre.Evaluate(statement, c.printer) } // Interactive starts an interactive user session, where input is propted from diff --git a/core/blockchain.go b/core/blockchain.go index 3c9e1f7cbc..8c2be72313 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -402,10 +402,7 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) { // Export writes the active chain to the given writer. func (self *BlockChain) Export(w io.Writer) error { - if err := self.ExportN(w, uint64(0), self.currentBlock.NumberU64()); err != nil { - return err - } - return nil + return self.ExportN(w, uint64(0), self.currentBlock.NumberU64()) } // ExportN writes a subset of the active chain to the given writer. diff --git a/core/dao.go b/core/dao.go index 1260c310ae..a7f544c3d9 100644 --- a/core/dao.go +++ b/core/dao.go @@ -45,11 +45,11 @@ func ValidateDAOHeaderExtraData(config *params.ChainConfig, header *types.Header } // Depending whether we support or oppose the fork, validate the extra-data contents if config.DAOForkSupport { - if bytes.Compare(header.Extra, params.DAOForkBlockExtra) != 0 { + if !bytes.Equal(header.Extra, params.DAOForkBlockExtra) { return ValidationError("DAO pro-fork bad block extra-data: 0x%x", header.Extra) } } else { - if bytes.Compare(header.Extra, params.DAOForkBlockExtra) == 0 { + if bytes.Equal(header.Extra, params.DAOForkBlockExtra) { return ValidationError("DAO no-fork bad block extra-data: 0x%x", header.Extra) } } diff --git a/core/database_util_test.go b/core/database_util_test.go index c8fd857eac..d96aa71ba8 100644 --- a/core/database_util_test.go +++ b/core/database_util_test.go @@ -430,7 +430,7 @@ func TestReceiptStorage(t *testing.T) { rlpHave, _ := rlp.EncodeToBytes(r) rlpWant, _ := rlp.EncodeToBytes(receipt) - if bytes.Compare(rlpHave, rlpWant) != 0 { + if !bytes.Equal(rlpHave, rlpWant) { t.Fatalf("receipt #%d [%x]: receipt mismatch: have %v, want %v", i, receipt.TxHash, r, receipt) } } @@ -488,7 +488,7 @@ func TestBlockReceiptStorage(t *testing.T) { rlpHave, _ := rlp.EncodeToBytes(rs[i]) rlpWant, _ := rlp.EncodeToBytes(receipts[i]) - if bytes.Compare(rlpHave, rlpWant) != 0 { + if !bytes.Equal(rlpHave, rlpWant) { t.Fatalf("receipt #%d: receipt mismatch: have %v, want %v", i, rs[i], receipts[i]) } } diff --git a/core/state/iterator.go b/core/state/iterator.go index 14265b277a..a58a15ad39 100644 --- a/core/state/iterator.go +++ b/core/state/iterator.go @@ -123,7 +123,7 @@ func (it *NodeIterator) step() error { if !it.dataIt.Next() { it.dataIt = nil } - if bytes.Compare(account.CodeHash, emptyCodeHash) != 0 { + if !bytes.Equal(account.CodeHash, emptyCodeHash) { it.codeHash = common.BytesToHash(account.CodeHash) it.code, err = it.state.db.Get(account.CodeHash) if err != nil { diff --git a/core/state/sync_test.go b/core/state/sync_test.go index cb585f78c2..43d146e3aa 100644 --- a/core/state/sync_test.go +++ b/core/state/sync_test.go @@ -84,7 +84,7 @@ func checkStateAccounts(t *testing.T, db ethdb.Database, root common.Hash, accou if nonce := state.GetNonce(acc.address); nonce != acc.nonce { t.Errorf("account %d: nonce mismatch: have %v, want %v", i, nonce, acc.nonce) } - if code := state.GetCode(acc.address); bytes.Compare(code, acc.code) != 0 { + if code := state.GetCode(acc.address); !bytes.Equal(code, acc.code) { t.Errorf("account %d: code mismatch: have %x, want %x", i, code, acc.code) } } @@ -294,7 +294,7 @@ func TestIncompleteStateSync(t *testing.T) { // Skim through the accounts and make sure the root hash is not a code node codeHash := false for _, acc := range srcAccounts { - if bytes.Compare(root.Bytes(), crypto.Sha3(acc.code)) == 0 { + if root == crypto.Keccak256Hash(acc.code) { codeHash = true break } diff --git a/core/types/bloom9.go b/core/types/bloom9.go index bcca59907f..32aa47a41b 100644 --- a/core/types/bloom9.go +++ b/core/types/bloom9.go @@ -97,14 +97,8 @@ func CreateBloom(receipts Receipts) Bloom { func LogsBloom(logs []*Log) *big.Int { bin := new(big.Int) for _, log := range logs { - data := make([]common.Hash, len(log.Topics)) bin.Or(bin, bloom9(log.Address.Bytes())) - - for i, topic := range log.Topics { - data[i] = topic - } - - for _, b := range data { + for _, b := range log.Topics { bin.Or(bin, bloom9(b[:])) } } diff --git a/crypto/crypto_test.go b/crypto/crypto_test.go index 86a5823064..f42605d32b 100644 --- a/crypto/crypto_test.go +++ b/crypto/crypto_test.go @@ -217,7 +217,7 @@ func TestValidateSignatureValues(t *testing.T) { func checkhash(t *testing.T, name string, f func([]byte) []byte, msg, exp []byte) { sum := f(msg) - if bytes.Compare(exp, sum) != 0 { + if !bytes.Equal(exp, sum) { t.Fatalf("hash %s mismatch: want: %x have: %x", name, exp, sum) } } diff --git a/crypto/ecies/ecies.go b/crypto/ecies/ecies.go index 86a70261d2..b1a716c005 100644 --- a/crypto/ecies/ecies.go +++ b/crypto/ecies/ecies.go @@ -291,9 +291,8 @@ func Encrypt(rand io.Reader, pub *PublicKey, m, s1, s2 []byte) (ct []byte, err e // Decrypt decrypts an ECIES ciphertext. func (prv *PrivateKey) Decrypt(rand io.Reader, c, s1, s2 []byte) (m []byte, err error) { - if c == nil || len(c) == 0 { - err = ErrInvalidMessage - return + if len(c) == 0 { + return nil, ErrInvalidMessage } params := prv.PublicKey.Params if params == nil { diff --git a/eth/handler.go b/eth/handler.go index 771e69b8df..1de3f67e65 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -607,38 +607,16 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { } case msg.Code == NewBlockHashesMsg: - // Retrieve and deserialize the remote new block hashes notification - type announce struct { - Hash common.Hash - Number uint64 - } - var announces = []announce{} - - if p.version < eth62 { - // We're running the old protocol, make block number unknown (0) - var hashes []common.Hash - if err := msg.Decode(&hashes); err != nil { - return errResp(ErrDecode, "%v: %v", msg, err) - } - for _, hash := range hashes { - announces = append(announces, announce{hash, 0}) - } - } else { - // Otherwise extract both block hash and number - var request newBlockHashesData - if err := msg.Decode(&request); err != nil { - return errResp(ErrDecode, "%v: %v", msg, err) - } - for _, block := range request { - announces = append(announces, announce{block.Hash, block.Number}) - } + var announces newBlockHashesData + if err := msg.Decode(&announces); err != nil { + return errResp(ErrDecode, "%v: %v", msg, err) } // Mark the hashes as present at the remote node for _, block := range announces { p.MarkBlock(block.Hash) } // Schedule all the unknown hashes for retrieval - unknown := make([]announce, 0, len(announces)) + unknown := make(newBlockHashesData, 0, len(announces)) for _, block := range announces { if !pm.blockchain.HasBlock(block.Hash) { unknown = append(unknown, block) diff --git a/ethstats/ethstats.go b/ethstats/ethstats.go index b802d347f6..8692a43bdc 100644 --- a/ethstats/ethstats.go +++ b/ethstats/ethstats.go @@ -388,10 +388,7 @@ func (s *Service) reportLatency(out *json.Encoder) error { "latency": strconv.Itoa(int((time.Since(start) / time.Duration(2)).Nanoseconds() / 1000000)), }}, } - if err := out.Encode(latency); err != nil { - return err - } - return nil + return out.Encode(latency) } // blockStats is the information to report about individual blocks. @@ -440,10 +437,7 @@ func (s *Service) reportBlock(out *json.Encoder, block *types.Block) error { report := map[string][]interface{}{ "emit": {"block", stats}, } - if err := out.Encode(report); err != nil { - return err - } - return nil + return out.Encode(report) } // assembleBlockStats retrieves any required metadata to report a single block @@ -497,9 +491,7 @@ func (s *Service) reportHistory(out *json.Encoder, list []uint64) error { indexes := make([]uint64, 0, historyUpdateRange) if len(list) > 0 { // Specific indexes requested, send them back in particular - for _, idx := range list { - indexes = append(indexes, idx) - } + indexes = append(indexes, list...) } else { // No indexes requested, send back the top ones var head *types.Header @@ -533,10 +525,7 @@ func (s *Service) reportHistory(out *json.Encoder, list []uint64) error { report := map[string][]interface{}{ "emit": {"history", stats}, } - if err := out.Encode(report); err != nil { - return err - } - return nil + return out.Encode(report) } // pendStats is the information to report about pending transactions. @@ -564,10 +553,7 @@ func (s *Service) reportPending(out *json.Encoder) error { report := map[string][]interface{}{ "emit": {"pending", stats}, } - if err := out.Encode(report); err != nil { - return err - } - return nil + return out.Encode(report) } // blockStats is the information to report about the local node. @@ -618,8 +604,5 @@ func (s *Service) reportStats(out *json.Encoder) error { report := map[string][]interface{}{ "emit": {"stats", stats}, } - if err := out.Encode(report); err != nil { - return err - } - return nil + return out.Encode(report) } diff --git a/miner/worker.go b/miner/worker.go index 9e70c8f046..68ce44db09 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -449,7 +449,7 @@ func (self *worker) commitNewWork() { // Depending whether we support or oppose the fork, override differently if self.config.DAOForkSupport { header.Extra = common.CopyBytes(params.DAOForkBlockExtra) - } else if bytes.Compare(header.Extra, params.DAOForkBlockExtra) == 0 { + } else if bytes.Equal(header.Extra, params.DAOForkBlockExtra) { header.Extra = []byte{} // If miner opposes, don't let it use the reserved extra-data } } diff --git a/mobile/bind.go b/mobile/bind.go index a25c37acad..bc4eb25bad 100644 --- a/mobile/bind.go +++ b/mobile/bind.go @@ -114,17 +114,12 @@ type BoundContract struct { // DeployContract deploys a contract onto the Ethereum blockchain and binds the // deployment address with a wrapper. func DeployContract(opts *TransactOpts, abiJSON string, bytecode []byte, client *EthereumClient, args *Interfaces) (contract *BoundContract, _ error) { - // Convert all the deployment parameters to Go types - params := make([]interface{}, len(args.objects)) - for i, obj := range args.objects { - params[i] = obj - } // Deploy the contract to the network parsed, err := abi.JSON(strings.NewReader(abiJSON)) if err != nil { return nil, err } - addr, tx, bound, err := bind.DeployContract(&opts.opts, parsed, bytecode, client.client, params...) + addr, tx, bound, err := bind.DeployContract(&opts.opts, parsed, bytecode, client.client, args.objects...) if err != nil { return nil, err } @@ -159,32 +154,18 @@ func (c *BoundContract) GetDeployer() *Transaction { // Call invokes the (constant) contract method with params as input values and // sets the output to result. func (c *BoundContract) Call(opts *CallOpts, out *Interfaces, method string, args *Interfaces) error { - // Convert all the input and output parameters to Go types - params := make([]interface{}, len(args.objects)) - for i, obj := range args.objects { - params[i] = obj - } results := make([]interface{}, len(out.objects)) - for i, obj := range out.objects { - results[i] = obj - } - // Execute the call to the contract and wrap any results - if err := c.contract.Call(&opts.opts, &results, method, params...); err != nil { + copy(results, out.objects) + if err := c.contract.Call(&opts.opts, &results, method, args.objects...); err != nil { return err } - for i, res := range results { - out.objects[i] = res - } + copy(out.objects, results) return nil } // Transact invokes the (paid) contract method with params as input values. func (c *BoundContract) Transact(opts *TransactOpts, method string, args *Interfaces) (tx *Transaction, _ error) { - params := make([]interface{}, len(args.objects)) - for i, obj := range args.objects { - params[i] = obj - } - rawTx, err := c.contract.Transact(&opts.opts, method, params) + rawTx, err := c.contract.Transact(&opts.opts, method, args.objects) if err != nil { return nil, err } diff --git a/node/config_test.go b/node/config_test.go index b258d2a8b8..d18732fdb0 100644 --- a/node/config_test.go +++ b/node/config_test.go @@ -137,7 +137,7 @@ func TestNodeKeyPersistency(t *testing.T) { if err != nil { t.Fatalf("failed to read previously persisted node key: %v", err) } - if bytes.Compare(blob1, blob2) != 0 { + if !bytes.Equal(blob1, blob2) { t.Fatalf("persisted node key mismatch: have %x, want %x", blob2, blob1) } diff --git a/node/node_test.go b/node/node_test.go index 6b2b62d73b..95fa53b067 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -218,7 +218,7 @@ func TestServiceRestarts(t *testing.T) { } defer stack.Stop() - if running != true || started != 1 { + if running || started != 1 { t.Fatalf("running/started mismatch: have %v/%d, want true/1", running, started) } // Restart the stack a few times and check successful service restarts @@ -227,7 +227,7 @@ func TestServiceRestarts(t *testing.T) { t.Fatalf("iter %d: failed to restart stack: %v", i, err) } } - if running != true || started != 4 { + if !running || started != 4 { t.Fatalf("running/started mismatch: have %v/%d, want true/4", running, started) } } diff --git a/p2p/discover/database.go b/p2p/discover/database.go index d6ea507bb6..8d20d1ec74 100644 --- a/p2p/discover/database.go +++ b/p2p/discover/database.go @@ -258,7 +258,7 @@ func (db *nodeDB) expireNodes() error { continue } // Skip the node if not expired yet (and not self) - if bytes.Compare(id[:], db.self[:]) != 0 { + if !bytes.Equal(id[:], db.self[:]) { if seen := db.lastPong(id); seen.After(threshold) { continue } diff --git a/p2p/discover/node.go b/p2p/discover/node.go index eec0bae0c4..8b1062d87b 100644 --- a/p2p/discover/node.go +++ b/p2p/discover/node.go @@ -224,11 +224,8 @@ func (n NodeID) GoString() string { // HexID converts a hex string to a NodeID. // The string may be prefixed with 0x. func HexID(in string) (NodeID, error) { - if strings.HasPrefix(in, "0x") { - in = in[2:] - } var id NodeID - b, err := hex.DecodeString(in) + b, err := hex.DecodeString(strings.TrimPrefix(in, "0x")) if err != nil { return id, err } else if len(b) != len(id) { diff --git a/p2p/discv5/database.go b/p2p/discv5/database.go index 7c47c27fd0..44be8a74ec 100644 --- a/p2p/discv5/database.go +++ b/p2p/discv5/database.go @@ -269,7 +269,7 @@ func (db *nodeDB) expireNodes() error { continue } // Skip the node if not expired yet (and not self) - if bytes.Compare(id[:], db.self[:]) != 0 { + if !bytes.Equal(id[:], db.self[:]) { if seen := db.lastPong(id); seen.After(threshold) { continue } diff --git a/p2p/discv5/node.go b/p2p/discv5/node.go index b2025ebcbb..cfc833ff55 100644 --- a/p2p/discv5/node.go +++ b/p2p/discv5/node.go @@ -262,11 +262,8 @@ func (n NodeID) GoString() string { // HexID converts a hex string to a NodeID. // The string may be prefixed with 0x. func HexID(in string) (NodeID, error) { - if strings.HasPrefix(in, "0x") { - in = in[2:] - } var id NodeID - b, err := hex.DecodeString(in) + b, err := hex.DecodeString(strings.TrimPrefix(in, "0x")) if err != nil { return id, err } else if len(b) != len(id) { diff --git a/swarm/storage/dbstore.go b/swarm/storage/dbstore.go index 5ecc5c5006..4ddebb0217 100644 --- a/swarm/storage/dbstore.go +++ b/swarm/storage/dbstore.go @@ -354,7 +354,7 @@ func (s *DbStore) Get(key Key) (chunk *Chunk, err error) { hasher := s.hashfunc() hasher.Write(data) hash := hasher.Sum(nil) - if bytes.Compare(hash, key) != 0 { + if !bytes.Equal(hash, key) { s.db.Delete(getDataKey(index.Idx)) err = fmt.Errorf("invalid chunk. hash=%x, key=%v", hash, key[:]) return diff --git a/swarm/storage/types.go b/swarm/storage/types.go index 0dcbc01008..c365220122 100644 --- a/swarm/storage/types.go +++ b/swarm/storage/types.go @@ -41,7 +41,7 @@ func (x Key) Size() uint { } func (x Key) isEqual(y Key) bool { - return bytes.Compare(x, y) == 0 + return bytes.Equal(x, y) } func (h Key) bits(i, j uint) uint { diff --git a/tests/block_test_util.go b/tests/block_test_util.go index f043295466..ea63c99969 100644 --- a/tests/block_test_util.go +++ b/tests/block_test_util.go @@ -552,9 +552,7 @@ func LoadBlockTests(file string) (map[string]*BlockTest, error) { // Nothing to see here, please move along... func prepInt(base int, s string) string { if base == 16 { - if strings.HasPrefix(s, "0x") { - s = s[2:] - } + s = strings.TrimPrefix(s, "0x") if len(s) == 0 { s = "00" } diff --git a/tests/init.go b/tests/init.go index 361be5f62d..7b0924bc38 100644 --- a/tests/init.go +++ b/tests/init.go @@ -87,11 +87,7 @@ func readJsonHttp(uri string, value interface{}) error { } defer resp.Body.Close() - err = readJson(resp.Body, value) - if err != nil { - return err - } - return nil + return readJson(resp.Body, value) } func readJsonFile(fn string, value interface{}) error { diff --git a/tests/state_test_util.go b/tests/state_test_util.go index 8221815a88..7841aecfea 100644 --- a/tests/state_test_util.go +++ b/tests/state_test_util.go @@ -159,7 +159,7 @@ func runStateTest(chainConfig *params.ChainConfig, test VmTest) error { } else { rexp = common.FromHex(test.Out) } - if bytes.Compare(rexp, ret) != 0 { + if !bytes.Equal(rexp, ret) { return fmt.Errorf("return failed. Expected %x, got %x\n", rexp, ret) } diff --git a/tests/vm_test_util.go b/tests/vm_test_util.go index d6411147fe..f3b9fd1c91 100644 --- a/tests/vm_test_util.go +++ b/tests/vm_test_util.go @@ -172,7 +172,7 @@ func runVmTest(test VmTest) error { // Compare expected and actual return rexp := common.FromHex(test.Out) - if bytes.Compare(rexp, ret) != 0 { + if !bytes.Equal(rexp, ret) { return fmt.Errorf("return failed. Expected %x, got %x\n", rexp, ret) } diff --git a/trie/sync_test.go b/trie/sync_test.go index 6405a51c32..4168c4d658 100644 --- a/trie/sync_test.go +++ b/trie/sync_test.go @@ -67,7 +67,7 @@ func checkTrieContents(t *testing.T, db Database, root []byte, content map[strin t.Fatalf("inconsistent trie at %x: %v", root, err) } for key, val := range content { - if have := trie.Get([]byte(key)); bytes.Compare(have, val) != 0 { + if have := trie.Get([]byte(key)); !bytes.Equal(have, val) { t.Errorf("entry %x: content mismatch: have %x, want %x", key, have, val) } } diff --git a/whisper/shhapi/api.go b/whisper/shhapi/api.go index 24d54b6533..379bb90d3d 100644 --- a/whisper/shhapi/api.go +++ b/whisper/shhapi/api.go @@ -178,14 +178,10 @@ func (api *PublicWhisperAPI) NewFilter(args WhisperFilterArgs) (uint32, error) { Messages: make(map[common.Hash]*whisperv5.ReceivedMessage), AcceptP2P: args.AcceptP2P, } - if len(filter.KeySym) > 0 { filter.SymKeyHash = crypto.Keccak256Hash(filter.KeySym) } - - for _, t := range args.Topics { - filter.Topics = append(filter.Topics, t) - } + filter.Topics = append(filter.Topics, args.Topics...) if len(args.Topics) == 0 { info := "NewFilter: at least one topic must be specified" diff --git a/whisper/shhapi/api_test.go b/whisper/shhapi/api_test.go index d2890a9a3a..60b6fbd041 100644 --- a/whisper/shhapi/api_test.go +++ b/whisper/shhapi/api_test.go @@ -253,7 +253,7 @@ func TestUnmarshalPostArgs(t *testing.T) { if a.FilterID != 64 { t.Fatalf("wrong FilterID: %d.", a.FilterID) } - if bytes.Compare(a.PeerID[:], a.Topic[:]) != 0 { + if !bytes.Equal(a.PeerID[:], a.Topic[:]) { t.Fatalf("wrong PeerID: %x.", a.PeerID) } } diff --git a/whisper/whisperv2/envelope_test.go b/whisper/whisperv2/envelope_test.go index 75e2fbe8a6..c1b128c617 100644 --- a/whisper/whisperv2/envelope_test.go +++ b/whisper/whisperv2/envelope_test.go @@ -40,10 +40,10 @@ func TestEnvelopeOpen(t *testing.T) { if opened.Flags != message.Flags { t.Fatalf("flags mismatch: have %d, want %d", opened.Flags, message.Flags) } - if bytes.Compare(opened.Signature, message.Signature) != 0 { + if !bytes.Equal(opened.Signature, message.Signature) { t.Fatalf("signature mismatch: have 0x%x, want 0x%x", opened.Signature, message.Signature) } - if bytes.Compare(opened.Payload, message.Payload) != 0 { + if !bytes.Equal(opened.Payload, message.Payload) { t.Fatalf("payload mismatch: have 0x%x, want 0x%x", opened.Payload, message.Payload) } if opened.Sent.Unix() != message.Sent.Unix() { @@ -71,7 +71,7 @@ func TestEnvelopeAnonymousOpenUntargeted(t *testing.T) { if opened.To != nil { t.Fatalf("recipient mismatch: have 0x%x, want nil", opened.To) } - if bytes.Compare(opened.Payload, payload) != 0 { + if !bytes.Equal(opened.Payload, payload) { t.Fatalf("payload mismatch: have 0x%x, want 0x%x", opened.Payload, payload) } } @@ -96,7 +96,7 @@ func TestEnvelopeAnonymousOpenTargeted(t *testing.T) { if opened.To != nil { t.Fatalf("recipient mismatch: have 0x%x, want nil", opened.To) } - if bytes.Compare(opened.Payload, payload) == 0 { + if bytes.Equal(opened.Payload, payload) { t.Fatalf("payload match, should have been encrypted: 0x%x", opened.Payload) } } @@ -127,7 +127,7 @@ func TestEnvelopeIdentifiedOpenUntargeted(t *testing.T) { if opened.To != nil { t.Fatalf("recipient mismatch: have 0x%x, want nil", opened.To) } - if bytes.Compare(opened.Payload, payload) != 0 { + if !bytes.Equal(opened.Payload, payload) { t.Fatalf("payload mismatch: have 0x%x, want 0x%x", opened.Payload, payload) } } @@ -152,7 +152,7 @@ func TestEnvelopeIdentifiedOpenTargeted(t *testing.T) { if opened.To != nil { t.Fatalf("recipient mismatch: have 0x%x, want nil", opened.To) } - if bytes.Compare(opened.Payload, payload) != 0 { + if !bytes.Equal(opened.Payload, payload) { t.Fatalf("payload mismatch: have 0x%x, want 0x%x", opened.Payload, payload) } } diff --git a/whisper/whisperv2/filter.go b/whisper/whisperv2/filter.go index ed7dcd6aed..7404859b78 100644 --- a/whisper/whisperv2/filter.go +++ b/whisper/whisperv2/filter.go @@ -120,10 +120,7 @@ func (self filterer) Compare(f filter.Filter) bool { break } } - if !self.matcher.Matches(topics) { - return false - } - return true + return self.matcher.Matches(topics) } // Trigger is called when a filter successfully matches an inbound message. diff --git a/whisper/whisperv2/filter_test.go b/whisper/whisperv2/filter_test.go index 5a14a84bbd..ffdfd7b349 100644 --- a/whisper/whisperv2/filter_test.go +++ b/whisper/whisperv2/filter_test.go @@ -91,7 +91,7 @@ func TestFilterTopicsCreation(t *testing.T) { continue } for k := 0; k < len(condition); k++ { - if bytes.Compare(condition[k][:], tt.filter[j][k][:]) != 0 { + if !bytes.Equal(condition[k][:], tt.filter[j][k][:]) { t.Errorf("test %d, condition %d, segment %d: filter mismatch: have 0x%x, want 0x%x", i, j, k, condition[k], tt.filter[j][k]) } } @@ -115,7 +115,7 @@ func TestFilterTopicsCreation(t *testing.T) { continue } for k := 0; k < len(condition); k++ { - if bytes.Compare(condition[k][:], tt.filter[j][k][:]) != 0 { + if !bytes.Equal(condition[k][:], tt.filter[j][k][:]) { t.Errorf("test %d, condition %d, segment %d: filter mismatch: have 0x%x, want 0x%x", i, j, k, condition[k], tt.filter[j][k]) } } @@ -135,7 +135,7 @@ func TestFilterTopicsCreation(t *testing.T) { continue } for k := 0; k < len(condition); k++ { - if bytes.Compare(condition[k][:], tt.filter[j][k][:]) != 0 { + if !bytes.Equal(condition[k][:], tt.filter[j][k][:]) { t.Errorf("test %d, condition %d, segment %d: filter mismatch: have 0x%x, want 0x%x", i, j, k, condition[k], tt.filter[j][k]) } } @@ -156,7 +156,7 @@ func TestFilterTopicsCreation(t *testing.T) { continue } for k := 0; k < len(condition); k++ { - if bytes.Compare(condition[k][:], tt.filter[j][k][:]) != 0 { + if !bytes.Equal(condition[k][:], tt.filter[j][k][:]) { t.Errorf("test %d, condition %d, segment %d: filter mismatch: have 0x%x, want 0x%x", i, j, k, condition[k], tt.filter[j][k]) } } diff --git a/whisper/whisperv2/message_test.go b/whisper/whisperv2/message_test.go index efa64e431a..d3b307d2ab 100644 --- a/whisper/whisperv2/message_test.go +++ b/whisper/whisperv2/message_test.go @@ -40,7 +40,7 @@ func TestMessageSimpleWrap(t *testing.T) { if len(msg.Signature) != 0 { t.Fatalf("signature found for simple wrapping: 0x%x", msg.Signature) } - if bytes.Compare(msg.Payload, payload) != 0 { + if !bytes.Equal(msg.Payload, payload) { t.Fatalf("payload mismatch after wrapping: have 0x%x, want 0x%x", msg.Payload, payload) } if msg.TTL/time.Second != DefaultTTL/time.Second { @@ -65,7 +65,7 @@ func TestMessageCleartextSignRecover(t *testing.T) { if msg.Flags&signatureFlag != signatureFlag { t.Fatalf("signature flag mismatch: have %d, want %d", msg.Flags&signatureFlag, signatureFlag) } - if bytes.Compare(msg.Payload, payload) != 0 { + if !bytes.Equal(msg.Payload, payload) { t.Fatalf("payload mismatch after signing: have 0x%x, want 0x%x", msg.Payload, payload) } diff --git a/whisper/whisperv2/topic_test.go b/whisper/whisperv2/topic_test.go index 66c84ba352..bb65689963 100644 --- a/whisper/whisperv2/topic_test.go +++ b/whisper/whisperv2/topic_test.go @@ -33,13 +33,13 @@ func TestTopicCreation(t *testing.T) { // Create the topics individually for i, tt := range topicCreationTests { topic := NewTopic(tt.data) - if bytes.Compare(topic[:], tt.hash[:]) != 0 { + if !bytes.Equal(topic[:], tt.hash[:]) { t.Errorf("binary test %d: hash mismatch: have %v, want %v.", i, topic, tt.hash) } } for i, tt := range topicCreationTests { topic := NewTopicFromString(string(tt.data)) - if bytes.Compare(topic[:], tt.hash[:]) != 0 { + if !bytes.Equal(topic[:], tt.hash[:]) { t.Errorf("textual test %d: hash mismatch: have %v, want %v.", i, topic, tt.hash) } } @@ -55,13 +55,13 @@ func TestTopicCreation(t *testing.T) { topics := NewTopics(binaryData...) for i, tt := range topicCreationTests { - if bytes.Compare(topics[i][:], tt.hash[:]) != 0 { + if !bytes.Equal(topics[i][:], tt.hash[:]) { t.Errorf("binary batch test %d: hash mismatch: have %v, want %v.", i, topics[i], tt.hash) } } topics = NewTopicsFromStrings(textualData...) for i, tt := range topicCreationTests { - if bytes.Compare(topics[i][:], tt.hash[:]) != 0 { + if !bytes.Equal(topics[i][:], tt.hash[:]) { t.Errorf("textual batch test %d: hash mismatch: have %v, want %v.", i, topics[i], tt.hash) } } diff --git a/whisper/whisperv5/message_test.go b/whisper/whisperv5/message_test.go index 5cbc9182f2..3eb71653df 100644 --- a/whisper/whisperv5/message_test.go +++ b/whisper/whisperv5/message_test.go @@ -104,10 +104,10 @@ func singleMessageTest(t *testing.T, symmetric bool) { } padsz := len(decrypted.Padding) - if bytes.Compare(steg[:padsz], decrypted.Padding) != 0 { + if !bytes.Equal(steg[:padsz], decrypted.Padding) { t.Fatalf("failed with seed %d: compare padding.", seed) } - if bytes.Compare(text, decrypted.Payload) != 0 { + if !bytes.Equal(text, decrypted.Payload) { t.Fatalf("failed with seed %d: compare payload.", seed) } if !isMessageSigned(decrypted.Raw[0]) { @@ -256,10 +256,10 @@ func singleEnvelopeOpenTest(t *testing.T, symmetric bool) { } padsz := len(decrypted.Padding) - if bytes.Compare(steg[:padsz], decrypted.Padding) != 0 { + if !bytes.Equal(steg[:padsz], decrypted.Padding) { t.Fatalf("failed with seed %d: compare padding.", seed) } - if bytes.Compare(text, decrypted.Payload) != 0 { + if !bytes.Equal(text, decrypted.Payload) { t.Fatalf("failed with seed %d: compare payload.", seed) } if !isMessageSigned(decrypted.Raw[0]) { diff --git a/whisper/whisperv5/peer_test.go b/whisper/whisperv5/peer_test.go index 88da59bff9..34e2ec255f 100644 --- a/whisper/whisperv5/peer_test.go +++ b/whisper/whisperv5/peer_test.go @@ -207,7 +207,7 @@ func checkPropagation(t *testing.T) { func validateMail(t *testing.T, index int, mail []*ReceivedMessage) bool { var cnt int for _, m := range mail { - if bytes.Compare(m.Payload, expectedMessage) == 0 { + if bytes.Equal(m.Payload, expectedMessage) { cnt++ } } diff --git a/whisper/whisperv5/whisper_test.go b/whisper/whisperv5/whisper_test.go index 9af95f4453..dbe0627fa5 100644 --- a/whisper/whisperv5/whisper_test.go +++ b/whisper/whisperv5/whisper_test.go @@ -239,7 +239,7 @@ func TestWhisperSymKeyManagement(t *testing.T) { if k1 == nil { t.Fatalf("first key does not exist.") } - if bytes.Compare(k1, randomKey) == 0 { + if bytes.Equal(k1, randomKey) { t.Fatalf("k1 == randomKey.") } if k2 != nil { @@ -264,10 +264,10 @@ func TestWhisperSymKeyManagement(t *testing.T) { if k2 == nil { t.Fatalf("k2 does not exist.") } - if bytes.Compare(k1, k2) == 0 { + if bytes.Equal(k1, k2) { t.Fatalf("k1 == k2.") } - if bytes.Compare(k1, randomKey) == 0 { + if bytes.Equal(k1, randomKey) { t.Fatalf("k1 == randomKey.") } if len(k1) != aesKeyLength { From 13e3b2f433c48fe81423c1a13e9a5194ece61b01 Mon Sep 17 00:00:00 2001 From: Felix Lange Date: Fri, 6 Jan 2017 16:55:03 +0100 Subject: [PATCH 3/3] logger, pow/dagger, pow/ezp: delete dead code --- cmd/geth/main.go | 1 - cmd/utils/cmd.go | 1 - core/block_validator_test.go | 6 +- core/blockchain.go | 3 - errs/errors.go | 22 -- errs/errors_test.go | 21 -- logger/example_test.go | 37 ---- logger/log.go | 65 ------ logger/loggers.go | 149 -------------- logger/loggers_test.go | 192 ------------------ logger/logsystem.go | 76 ------- logger/sys.go | 142 ------------- logger/types.go | 381 ----------------------------------- miner/worker.go | 2 - node/node_test.go | 2 +- p2p/server.go | 12 -- pow/dagger/dagger.go | 176 ---------------- pow/dagger/dagger_test.go | 35 ---- pow/ezp/pow.go | 113 ----------- swarm/network/protocol.go | 7 - 20 files changed, 3 insertions(+), 1440 deletions(-) delete mode 100644 logger/example_test.go delete mode 100644 logger/log.go delete mode 100644 logger/loggers.go delete mode 100644 logger/loggers_test.go delete mode 100644 logger/logsystem.go delete mode 100644 logger/sys.go delete mode 100644 logger/types.go delete mode 100644 pow/dagger/dagger.go delete mode 100644 pow/dagger/dagger_test.go delete mode 100644 pow/ezp/pow.go diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 332e1ae8d5..766e49f49d 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -168,7 +168,6 @@ func init() { } app.After = func(ctx *cli.Context) error { - logger.Flush() debug.Exit() console.Stdin.Close() // Resets terminal mode. return nil diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go index 287efc9c83..8666f37756 100644 --- a/cmd/utils/cmd.go +++ b/cmd/utils/cmd.go @@ -67,7 +67,6 @@ func Fatalf(format string, args ...interface{}) { } } fmt.Fprintf(w, "Fatal: "+format+"\n", args...) - logger.Flush() os.Exit(1) } diff --git a/core/block_validator_test.go b/core/block_validator_test.go index 0546866125..413c3cc8ec 100644 --- a/core/block_validator_test.go +++ b/core/block_validator_test.go @@ -27,7 +27,6 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/pow/ezp" ) func testChainConfig() *params.ChainConfig { @@ -48,20 +47,19 @@ func proc() (Validator, *BlockChain) { } func TestNumber(t *testing.T) { - pow := ezp.New() _, chain := proc() statedb, _ := state.New(chain.Genesis().Root(), chain.chainDb) cfg := testChainConfig() header := makeHeader(cfg, chain.Genesis(), statedb) header.Number = big.NewInt(3) - err := ValidateHeader(cfg, pow, header, chain.Genesis().Header(), false, false) + err := ValidateHeader(cfg, FakePow{}, header, chain.Genesis().Header(), false, false) if err != BlockNumberErr { t.Errorf("expected block number error, got %q", err) } header = makeHeader(cfg, chain.Genesis(), statedb) - err = ValidateHeader(cfg, pow, header, chain.Genesis().Header(), false, false) + err = ValidateHeader(cfg, FakePow{}, header, chain.Genesis().Header(), false, false) if err == BlockNumberErr { t.Errorf("didn't expect block number error") } diff --git a/core/blockchain.go b/core/blockchain.go index 8c2be72313..c3530b93c3 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -46,9 +46,6 @@ import ( ) var ( - chainlogger = logger.NewLogger("CHAIN") - jsonlogger = logger.NewJsonLogger() - blockInsertTimer = metrics.NewTimer("chain/inserts") ErrNoGenesis = errors.New("Genesis not found in chain") diff --git a/errs/errors.go b/errs/errors.go index 675649efa2..daa814db71 100644 --- a/errs/errors.go +++ b/errs/errors.go @@ -19,7 +19,6 @@ package errs import ( "fmt" - "github.com/ethereum/go-ethereum/logger" "github.com/ethereum/go-ethereum/logger/glog" ) @@ -32,15 +31,10 @@ Fields: Package: name of the package/component - - Level: - a function mapping error code to logger.LogLevel (severity) - if not given, errors default to logger.InfoLevel */ type Errors struct { Errors map[int]string Package string - Level func(code int) logger.LogLevel } /* @@ -58,7 +52,6 @@ type Error struct { Code int Name string Package string - level logger.LogLevel message string format string params []interface{} @@ -69,15 +62,10 @@ func (self *Errors) New(code int, format string, params ...interface{}) *Error { if !ok { panic("invalid error code") } - level := logger.InfoLevel - if self.Level != nil { - level = self.Level(code) - } return &Error{ Code: code, Name: name, Package: self.Package, - level: level, format: format, params: params, } @@ -98,13 +86,3 @@ func (self Error) Log(v glog.Verbose) { v.Infoln(self) } } - -/* -err.Fatal() is true if err's severity level is 0 or 1 (logger.ErrorLevel or logger.Silence) -*/ -func (self *Error) Fatal() (fatal bool) { - if self.level < logger.WarnLevel { - fatal = true - } - return -} diff --git a/errs/errors_test.go b/errs/errors_test.go index d6d14b45ea..5a2ffbec32 100644 --- a/errs/errors_test.go +++ b/errs/errors_test.go @@ -19,8 +19,6 @@ package errs import ( "fmt" "testing" - - "github.com/ethereum/go-ethereum/logger" ) func testErrors() *Errors { @@ -30,14 +28,6 @@ func testErrors() *Errors { 0: "zero", 1: "one", }, - Level: func(i int) (l logger.LogLevel) { - if i == 0 { - l = logger.ErrorLevel - } else { - l = logger.WarnLevel - } - return - }, } } @@ -49,14 +39,3 @@ func TestErrorMessage(t *testing.T) { t.Errorf("error message incorrect. expected %v, got %v", exp, message) } } - -func TestErrorSeverity(t *testing.T) { - err0 := testErrors().New(0, "zero detail") - if !err0.Fatal() { - t.Errorf("error should be fatal") - } - err1 := testErrors().New(1, "one detail") - if err1.Fatal() { - t.Errorf("error should not be fatal") - } -} diff --git a/logger/example_test.go b/logger/example_test.go deleted file mode 100644 index ce5f9da67f..0000000000 --- a/logger/example_test.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package logger - -import "os" - -func ExampleLogger() { - logger := NewLogger("TAG") - logger.Infoln("so awesome") // prints [TAG] so awesome - logger.Infof("this %q is raw", "coin") // prints [TAG] this "coin" is raw -} - -func ExampleLogSystem() { - filename := "test.log" - file, _ := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, os.ModePerm) - fileLog := NewStdLogSystem(file, 0, WarnLevel) - AddLogSystem(fileLog) - - stdoutLog := NewStdLogSystem(os.Stdout, 0, WarnLevel) - AddLogSystem(stdoutLog) - - NewLogger("TAG").Warnln("reactor meltdown") // writes to both logs -} diff --git a/logger/log.go b/logger/log.go deleted file mode 100644 index 38a6ce1391..0000000000 --- a/logger/log.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package logger - -import ( - "fmt" - "io" - "log" - "os" - - "github.com/ethereum/go-ethereum/common" -) - -func openLogFile(datadir string, filename string) *os.File { - path := common.AbsolutePath(datadir, filename) - file, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) - if err != nil { - panic(fmt.Sprintf("error opening log file '%s': %v", filename, err)) - } - return file -} - -func New(datadir string, logFile string, logLevel int) LogSystem { - var writer io.Writer - if logFile == "" { - writer = os.Stdout - } else { - writer = openLogFile(datadir, logFile) - } - - var sys LogSystem - sys = NewStdLogSystem(writer, log.LstdFlags, LogLevel(logLevel)) - AddLogSystem(sys) - - return sys -} - -func NewJSONsystem(datadir string, logFile string) LogSystem { - var writer io.Writer - if logFile == "-" { - writer = os.Stdout - } else { - writer = openLogFile(datadir, logFile) - } - - var sys LogSystem - sys = NewJsonLogSystem(writer) - AddLogSystem(sys) - - return sys -} diff --git a/logger/loggers.go b/logger/loggers.go deleted file mode 100644 index e63355d0bf..0000000000 --- a/logger/loggers.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -/* -Package logger implements a multi-output leveled logger. - -Other packages use tagged logger to send log messages to shared -(process-wide) logging engine. The shared logging engine dispatches to -multiple log systems. The log level can be set separately per log -system. - -Logging is asynchronous and does not block the caller. Message -formatting is performed by the caller goroutine to avoid incorrect -logging of mutable state. -*/ -package logger - -import ( - "encoding/json" - "fmt" - "os" -) - -type LogLevel uint32 - -const ( - // Standard log levels - Silence LogLevel = iota - ErrorLevel - WarnLevel - InfoLevel - DebugLevel - DebugDetailLevel -) - -// A Logger prints messages prefixed by a given tag. It provides named -// Printf and Println style methods for all loglevels. Each ethereum -// component should have its own logger with a unique prefix. -type Logger struct { - tag string -} - -func NewLogger(tag string) *Logger { - return &Logger{"[" + tag + "] "} -} - -func (logger *Logger) Sendln(level LogLevel, v ...interface{}) { - logMessageC <- stdMsg{level, logger.tag + fmt.Sprintln(v...)} -} - -func (logger *Logger) Sendf(level LogLevel, format string, v ...interface{}) { - logMessageC <- stdMsg{level, logger.tag + fmt.Sprintf(format, v...)} -} - -// Errorln writes a message with ErrorLevel. -func (logger *Logger) Errorln(v ...interface{}) { - logger.Sendln(ErrorLevel, v...) -} - -// Warnln writes a message with WarnLevel. -func (logger *Logger) Warnln(v ...interface{}) { - logger.Sendln(WarnLevel, v...) -} - -// Infoln writes a message with InfoLevel. -func (logger *Logger) Infoln(v ...interface{}) { - logger.Sendln(InfoLevel, v...) -} - -// Debugln writes a message with DebugLevel. -func (logger *Logger) Debugln(v ...interface{}) { - logger.Sendln(DebugLevel, v...) -} - -// DebugDetailln writes a message with DebugDetailLevel. -func (logger *Logger) DebugDetailln(v ...interface{}) { - logger.Sendln(DebugDetailLevel, v...) -} - -// Errorf writes a message with ErrorLevel. -func (logger *Logger) Errorf(format string, v ...interface{}) { - logger.Sendf(ErrorLevel, format, v...) -} - -// Warnf writes a message with WarnLevel. -func (logger *Logger) Warnf(format string, v ...interface{}) { - logger.Sendf(WarnLevel, format, v...) -} - -// Infof writes a message with InfoLevel. -func (logger *Logger) Infof(format string, v ...interface{}) { - logger.Sendf(InfoLevel, format, v...) -} - -// Debugf writes a message with DebugLevel. -func (logger *Logger) Debugf(format string, v ...interface{}) { - logger.Sendf(DebugLevel, format, v...) -} - -// DebugDetailf writes a message with DebugDetailLevel. -func (logger *Logger) DebugDetailf(format string, v ...interface{}) { - logger.Sendf(DebugDetailLevel, format, v...) -} - -// Fatalln writes a message with ErrorLevel and exits the program. -func (logger *Logger) Fatalln(v ...interface{}) { - logger.Sendln(ErrorLevel, v...) - Flush() - os.Exit(0) -} - -// Fatalf writes a message with ErrorLevel and exits the program. -func (logger *Logger) Fatalf(format string, v ...interface{}) { - logger.Sendf(ErrorLevel, format, v...) - Flush() - os.Exit(0) -} - -type JsonLogger struct { - Coinbase string -} - -func NewJsonLogger() *JsonLogger { - return &JsonLogger{} -} - -func (logger *JsonLogger) LogJson(v JsonLog) { - msgname := v.EventName() - obj := map[string]interface{}{ - msgname: v, - } - - jsontxt, _ := json.Marshal(obj) - logMessageC <- (jsonMsg(jsontxt)) - -} diff --git a/logger/loggers_test.go b/logger/loggers_test.go deleted file mode 100644 index 85564698bc..0000000000 --- a/logger/loggers_test.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package logger - -import ( - "io/ioutil" - "math/rand" - "os" - "sync" - "testing" - "time" -) - -type TestLogSystem struct { - mutex sync.Mutex - output string - level LogLevel -} - -func (ls *TestLogSystem) LogPrint(msg LogMsg) { - ls.mutex.Lock() - if ls.level >= msg.Level() { - ls.output += msg.String() - } - ls.mutex.Unlock() -} - -func (ls *TestLogSystem) SetLogLevel(i LogLevel) { - ls.mutex.Lock() - ls.level = i - ls.mutex.Unlock() -} - -func (ls *TestLogSystem) GetLogLevel() LogLevel { - ls.mutex.Lock() - defer ls.mutex.Unlock() - return ls.level -} - -func (ls *TestLogSystem) CheckOutput(t *testing.T, expected string) { - ls.mutex.Lock() - output := ls.output - ls.mutex.Unlock() - if output != expected { - t.Errorf("log output mismatch:\n got: %q\n want: %q\n", output, expected) - } -} - -type blockedLogSystem struct { - LogSystem - unblock chan struct{} -} - -func (ls blockedLogSystem) LogPrint(msg LogMsg) { - <-ls.unblock - ls.LogSystem.LogPrint(msg) -} - -func TestLoggerFlush(t *testing.T) { - Reset() - - logger := NewLogger("TEST") - ls := blockedLogSystem{&TestLogSystem{level: WarnLevel}, make(chan struct{})} - AddLogSystem(ls) - for i := 0; i < 5; i++ { - // these writes shouldn't hang even though ls is blocked - logger.Errorf(".") - } - - beforeFlush := time.Now() - time.AfterFunc(80*time.Millisecond, func() { close(ls.unblock) }) - Flush() // this should hang for approx. 80ms - if blockd := time.Now().Sub(beforeFlush); blockd < 80*time.Millisecond { - t.Errorf("Flush didn't block long enough, blocked for %v, should've been >= 80ms", blockd) - } - - ls.LogSystem.(*TestLogSystem).CheckOutput(t, "[TEST] .[TEST] .[TEST] .[TEST] .[TEST] .") -} - -func TestLoggerPrintln(t *testing.T) { - Reset() - - logger := NewLogger("TEST") - testLogSystem := &TestLogSystem{level: WarnLevel} - AddLogSystem(testLogSystem) - logger.Errorln("error") - logger.Warnln("warn") - logger.Infoln("info") - logger.Debugln("debug") - Flush() - - testLogSystem.CheckOutput(t, "[TEST] error\n[TEST] warn\n") -} - -func TestLoggerPrintf(t *testing.T) { - Reset() - - logger := NewLogger("TEST") - testLogSystem := &TestLogSystem{level: WarnLevel} - AddLogSystem(testLogSystem) - logger.Errorf("error to %v\n", []int{1, 2, 3}) - logger.Warnf("warn %%d %d", 5) - logger.Infof("info") - logger.Debugf("debug") - Flush() - testLogSystem.CheckOutput(t, "[TEST] error to [1 2 3]\n[TEST] warn %d 5") -} - -func TestMultipleLogSystems(t *testing.T) { - Reset() - - logger := NewLogger("TEST") - testLogSystem0 := &TestLogSystem{level: ErrorLevel} - testLogSystem1 := &TestLogSystem{level: WarnLevel} - AddLogSystem(testLogSystem0) - AddLogSystem(testLogSystem1) - logger.Errorln("error") - logger.Warnln("warn") - Flush() - - testLogSystem0.CheckOutput(t, "[TEST] error\n") - testLogSystem1.CheckOutput(t, "[TEST] error\n[TEST] warn\n") -} - -func TestFileLogSystem(t *testing.T) { - Reset() - - logger := NewLogger("TEST") - filename := "test.log" - file, _ := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, os.ModePerm) - testLogSystem := NewStdLogSystem(file, 0, WarnLevel) - AddLogSystem(testLogSystem) - logger.Errorf("error to %s\n", filename) - logger.Warnln("warn") - Flush() - contents, _ := ioutil.ReadFile(filename) - output := string(contents) - if output != "[TEST] error to test.log\n[TEST] warn\n" { - t.Error("Expected contents of file 'test.log': '[TEST] error to test.log\\n[TEST] warn\\n', got ", output) - } else { - os.Remove(filename) - } -} - -func TestNoLogSystem(t *testing.T) { - Reset() - - logger := NewLogger("TEST") - logger.Warnln("warn") - Flush() -} - -func TestConcurrentAddSystem(t *testing.T) { - rand.Seed(time.Now().Unix()) - Reset() - - logger := NewLogger("TEST") - stop := make(chan struct{}) - writer := func() { - select { - case <-stop: - return - default: - logger.Infoln("foo") - Flush() - } - } - - go writer() - go writer() - - stopTime := time.Now().Add(100 * time.Millisecond) - for time.Now().Before(stopTime) { - time.Sleep(time.Duration(rand.Intn(20)) * time.Millisecond) - AddLogSystem(NewStdLogSystem(ioutil.Discard, 0, InfoLevel)) - } - close(stop) -} diff --git a/logger/logsystem.go b/logger/logsystem.go deleted file mode 100644 index 24f4351d49..0000000000 --- a/logger/logsystem.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package logger - -import ( - "io" - "log" - "sync/atomic" -) - -// LogSystem is implemented by log output devices. -// All methods can be called concurrently from multiple goroutines. -type LogSystem interface { - LogPrint(LogMsg) -} - -// NewStdLogSystem creates a LogSystem that prints to the given writer. -// The flag values are defined package log. -func NewStdLogSystem(writer io.Writer, flags int, level LogLevel) *StdLogSystem { - logger := log.New(writer, "", flags) - return &StdLogSystem{logger, uint32(level)} -} - -type StdLogSystem struct { - logger *log.Logger - level uint32 -} - -func (t *StdLogSystem) LogPrint(msg LogMsg) { - stdmsg, ok := msg.(stdMsg) - if ok { - if t.GetLogLevel() >= stdmsg.Level() { - t.logger.Print(stdmsg.String()) - } - } -} - -func (t *StdLogSystem) SetLogLevel(i LogLevel) { - atomic.StoreUint32(&t.level, uint32(i)) -} - -func (t *StdLogSystem) GetLogLevel() LogLevel { - return LogLevel(atomic.LoadUint32(&t.level)) -} - -// NewJSONLogSystem creates a LogSystem that prints to the given writer without -// adding extra information irrespective of loglevel only if message is JSON type -func NewJsonLogSystem(writer io.Writer) LogSystem { - logger := log.New(writer, "", 0) - return &jsonLogSystem{logger} -} - -type jsonLogSystem struct { - logger *log.Logger -} - -func (t *jsonLogSystem) LogPrint(msg LogMsg) { - jsonmsg, ok := msg.(jsonMsg) - if ok { - t.logger.Print(jsonmsg.String()) - } -} diff --git a/logger/sys.go b/logger/sys.go deleted file mode 100644 index 18d4ea641c..0000000000 --- a/logger/sys.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package logger - -import ( - "fmt" - "sync" -) - -type stdMsg struct { - level LogLevel - msg string -} - -type jsonMsg []byte - -func (m jsonMsg) Level() LogLevel { - return 0 -} - -func (m jsonMsg) String() string { - return string(m) -} - -type LogMsg interface { - Level() LogLevel - fmt.Stringer -} - -func (m stdMsg) Level() LogLevel { - return m.level -} - -func (m stdMsg) String() string { - return m.msg -} - -var ( - logMessageC = make(chan LogMsg) - addSystemC = make(chan LogSystem) - flushC = make(chan chan struct{}) - resetC = make(chan chan struct{}) -) - -func init() { - go dispatchLoop() -} - -// each system can buffer this many messages before -// blocking incoming log messages. -const sysBufferSize = 500 - -func dispatchLoop() { - var ( - systems []LogSystem - systemIn []chan LogMsg - systemWG sync.WaitGroup - ) - bootSystem := func(sys LogSystem) { - in := make(chan LogMsg, sysBufferSize) - systemIn = append(systemIn, in) - systemWG.Add(1) - go sysLoop(sys, in, &systemWG) - } - - for { - select { - case msg := <-logMessageC: - for _, c := range systemIn { - c <- msg - } - - case sys := <-addSystemC: - systems = append(systems, sys) - bootSystem(sys) - - case waiter := <-resetC: - // reset means terminate all systems - for _, c := range systemIn { - close(c) - } - systems = nil - systemIn = nil - systemWG.Wait() - close(waiter) - - case waiter := <-flushC: - // flush means reboot all systems - for _, c := range systemIn { - close(c) - } - systemIn = nil - systemWG.Wait() - for _, sys := range systems { - bootSystem(sys) - } - close(waiter) - } - } -} - -func sysLoop(sys LogSystem, in <-chan LogMsg, wg *sync.WaitGroup) { - for msg := range in { - sys.LogPrint(msg) - } - wg.Done() -} - -// Reset removes all active log systems. -// It blocks until all current messages have been delivered. -func Reset() { - waiter := make(chan struct{}) - resetC <- waiter - <-waiter -} - -// Flush waits until all current log messages have been dispatched to -// the active log systems. -func Flush() { - waiter := make(chan struct{}) - flushC <- waiter - <-waiter -} - -// AddLogSystem starts printing messages to the given LogSystem. -func AddLogSystem(sys LogSystem) { - addSystemC <- sys -} diff --git a/logger/types.go b/logger/types.go deleted file mode 100644 index ee7e845de8..0000000000 --- a/logger/types.go +++ /dev/null @@ -1,381 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package logger - -import ( - "math/big" - "time" -) - -type utctime8601 struct{} - -func (utctime8601) MarshalJSON() ([]byte, error) { - timestr := time.Now().UTC().Format(time.RFC3339Nano) - // Bounds check - if len(timestr) > 26 { - timestr = timestr[:26] - } - return []byte(`"` + timestr + `Z"`), nil -} - -type JsonLog interface { - EventName() string -} - -type LogEvent struct { - // Guid string `json:"guid"` - Ts utctime8601 `json:"ts"` - // Level string `json:"level"` -} - -type LogStarting struct { - ClientString string `json:"client_impl"` - ProtocolVersion int `json:"eth_version"` - LogEvent -} - -func (l *LogStarting) EventName() string { - return "starting" -} - -type P2PConnected struct { - RemoteId string `json:"remote_id"` - RemoteAddress string `json:"remote_addr"` - RemoteVersionString string `json:"remote_version_string"` - NumConnections int `json:"num_connections"` - LogEvent -} - -func (l *P2PConnected) EventName() string { - return "p2p.connected" -} - -type P2PDisconnected struct { - NumConnections int `json:"num_connections"` - RemoteId string `json:"remote_id"` - LogEvent -} - -func (l *P2PDisconnected) EventName() string { - return "p2p.disconnected" -} - -type EthMinerNewBlock struct { - BlockHash string `json:"block_hash"` - BlockNumber *big.Int `json:"block_number"` - ChainHeadHash string `json:"chain_head_hash"` - BlockPrevHash string `json:"block_prev_hash"` - LogEvent -} - -func (l *EthMinerNewBlock) EventName() string { - return "eth.miner.new_block" -} - -type EthChainReceivedNewBlock struct { - BlockHash string `json:"block_hash"` - BlockNumber *big.Int `json:"block_number"` - ChainHeadHash string `json:"chain_head_hash"` - BlockPrevHash string `json:"block_prev_hash"` - RemoteId string `json:"remote_id"` - LogEvent -} - -func (l *EthChainReceivedNewBlock) EventName() string { - return "eth.chain.received.new_block" -} - -type EthChainNewHead struct { - BlockHash string `json:"block_hash"` - BlockNumber *big.Int `json:"block_number"` - ChainHeadHash string `json:"chain_head_hash"` - BlockPrevHash string `json:"block_prev_hash"` - LogEvent -} - -func (l *EthChainNewHead) EventName() string { - return "eth.chain.new_head" -} - -type EthTxReceived struct { - TxHash string `json:"tx_hash"` - RemoteId string `json:"remote_id"` - LogEvent -} - -func (l *EthTxReceived) EventName() string { - return "eth.tx.received" -} - -// -// -// The types below are legacy and need to be converted to new format or deleted -// -// - -// type P2PConnecting struct { -// RemoteId string `json:"remote_id"` -// RemoteEndpoint string `json:"remote_endpoint"` -// NumConnections int `json:"num_connections"` -// LogEvent -// } - -// func (l *P2PConnecting) EventName() string { -// return "p2p.connecting" -// } - -// type P2PHandshaked struct { -// RemoteCapabilities []string `json:"remote_capabilities"` -// RemoteId string `json:"remote_id"` -// NumConnections int `json:"num_connections"` -// LogEvent -// } - -// func (l *P2PHandshaked) EventName() string { -// return "p2p.handshaked" -// } - -// type P2PDisconnecting struct { -// Reason string `json:"reason"` -// RemoteId string `json:"remote_id"` -// NumConnections int `json:"num_connections"` -// LogEvent -// } - -// func (l *P2PDisconnecting) EventName() string { -// return "p2p.disconnecting" -// } - -// type P2PDisconnectingBadHandshake struct { -// Reason string `json:"reason"` -// RemoteId string `json:"remote_id"` -// NumConnections int `json:"num_connections"` -// LogEvent -// } - -// func (l *P2PDisconnectingBadHandshake) EventName() string { -// return "p2p.disconnecting.bad_handshake" -// } - -// type P2PDisconnectingBadProtocol struct { -// Reason string `json:"reason"` -// RemoteId string `json:"remote_id"` -// NumConnections int `json:"num_connections"` -// LogEvent -// } - -// func (l *P2PDisconnectingBadProtocol) EventName() string { -// return "p2p.disconnecting.bad_protocol" -// } - -// type P2PDisconnectingReputation struct { -// Reason string `json:"reason"` -// RemoteId string `json:"remote_id"` -// NumConnections int `json:"num_connections"` -// LogEvent -// } - -// func (l *P2PDisconnectingReputation) EventName() string { -// return "p2p.disconnecting.reputation" -// } - -// type P2PDisconnectingDHT struct { -// Reason string `json:"reason"` -// RemoteId string `json:"remote_id"` -// NumConnections int `json:"num_connections"` -// LogEvent -// } - -// func (l *P2PDisconnectingDHT) EventName() string { -// return "p2p.disconnecting.dht" -// } - -// type P2PEthDisconnectingBadBlock struct { -// Reason string `json:"reason"` -// RemoteId string `json:"remote_id"` -// NumConnections int `json:"num_connections"` -// LogEvent -// } - -// func (l *P2PEthDisconnectingBadBlock) EventName() string { -// return "p2p.eth.disconnecting.bad_block" -// } - -// type P2PEthDisconnectingBadTx struct { -// Reason string `json:"reason"` -// RemoteId string `json:"remote_id"` -// NumConnections int `json:"num_connections"` -// LogEvent -// } - -// func (l *P2PEthDisconnectingBadTx) EventName() string { -// return "p2p.eth.disconnecting.bad_tx" -// } - -// type EthNewBlockBroadcasted struct { -// BlockNumber int `json:"block_number"` -// HeadHash string `json:"head_hash"` -// BlockHash string `json:"block_hash"` -// BlockDifficulty int `json:"block_difficulty"` -// BlockPrevHash string `json:"block_prev_hash"` -// LogEvent -// } - -// func (l *EthNewBlockBroadcasted) EventName() string { -// return "eth.newblock.broadcasted" -// } - -// type EthNewBlockIsKnown struct { -// BlockNumber int `json:"block_number"` -// HeadHash string `json:"head_hash"` -// BlockHash string `json:"block_hash"` -// BlockDifficulty int `json:"block_difficulty"` -// BlockPrevHash string `json:"block_prev_hash"` -// LogEvent -// } - -// func (l *EthNewBlockIsKnown) EventName() string { -// return "eth.newblock.is_known" -// } - -// type EthNewBlockIsNew struct { -// BlockNumber int `json:"block_number"` -// HeadHash string `json:"head_hash"` -// BlockHash string `json:"block_hash"` -// BlockDifficulty int `json:"block_difficulty"` -// BlockPrevHash string `json:"block_prev_hash"` -// LogEvent -// } - -// func (l *EthNewBlockIsNew) EventName() string { -// return "eth.newblock.is_new" -// } - -// type EthNewBlockMissingParent struct { -// BlockNumber int `json:"block_number"` -// HeadHash string `json:"head_hash"` -// BlockHash string `json:"block_hash"` -// BlockDifficulty int `json:"block_difficulty"` -// BlockPrevHash string `json:"block_prev_hash"` -// LogEvent -// } - -// func (l *EthNewBlockMissingParent) EventName() string { -// return "eth.newblock.missing_parent" -// } - -// type EthNewBlockIsInvalid struct { -// BlockNumber int `json:"block_number"` -// HeadHash string `json:"head_hash"` -// BlockHash string `json:"block_hash"` -// BlockDifficulty int `json:"block_difficulty"` -// BlockPrevHash string `json:"block_prev_hash"` -// LogEvent -// } - -// func (l *EthNewBlockIsInvalid) EventName() string { -// return "eth.newblock.is_invalid" -// } - -// type EthNewBlockChainIsOlder struct { -// BlockNumber int `json:"block_number"` -// HeadHash string `json:"head_hash"` -// BlockHash string `json:"block_hash"` -// BlockDifficulty int `json:"block_difficulty"` -// BlockPrevHash string `json:"block_prev_hash"` -// LogEvent -// } - -// func (l *EthNewBlockChainIsOlder) EventName() string { -// return "eth.newblock.chain.is_older" -// } - -// type EthNewBlockChainIsCanonical struct { -// BlockNumber int `json:"block_number"` -// HeadHash string `json:"head_hash"` -// BlockHash string `json:"block_hash"` -// BlockDifficulty int `json:"block_difficulty"` -// BlockPrevHash string `json:"block_prev_hash"` -// LogEvent -// } - -// func (l *EthNewBlockChainIsCanonical) EventName() string { -// return "eth.newblock.chain.is_cannonical" -// } - -// type EthNewBlockChainNotCanonical struct { -// BlockNumber int `json:"block_number"` -// HeadHash string `json:"head_hash"` -// BlockHash string `json:"block_hash"` -// BlockDifficulty int `json:"block_difficulty"` -// BlockPrevHash string `json:"block_prev_hash"` -// LogEvent -// } - -// func (l *EthNewBlockChainNotCanonical) EventName() string { -// return "eth.newblock.chain.not_cannonical" -// } - -// type EthTxCreated struct { -// TxHash string `json:"tx_hash"` -// TxSender string `json:"tx_sender"` -// TxAddress string `json:"tx_address"` -// TxHexRLP string `json:"tx_hexrlp"` -// TxNonce int `json:"tx_nonce"` -// LogEvent -// } - -// func (l *EthTxCreated) EventName() string { -// return "eth.tx.created" -// } - -// type EthTxBroadcasted struct { -// TxHash string `json:"tx_hash"` -// TxSender string `json:"tx_sender"` -// TxAddress string `json:"tx_address"` -// TxNonce int `json:"tx_nonce"` -// LogEvent -// } - -// func (l *EthTxBroadcasted) EventName() string { -// return "eth.tx.broadcasted" -// } - -// type EthTxValidated struct { -// TxHash string `json:"tx_hash"` -// TxSender string `json:"tx_sender"` -// TxAddress string `json:"tx_address"` -// TxNonce int `json:"tx_nonce"` -// LogEvent -// } - -// func (l *EthTxValidated) EventName() string { -// return "eth.tx.validated" -// } - -// type EthTxIsInvalid struct { -// TxHash string `json:"tx_hash"` -// TxSender string `json:"tx_sender"` -// TxAddress string `json:"tx_address"` -// Reason string `json:"reason"` -// TxNonce int `json:"tx_nonce"` -// LogEvent -// } - -// func (l *EthTxIsInvalid) EventName() string { -// return "eth.tx.is_invalid" -// } diff --git a/miner/worker.go b/miner/worker.go index 68ce44db09..56fd4ea663 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -39,8 +39,6 @@ import ( "gopkg.in/fatih/set.v0" ) -var jsonlogger = logger.NewJsonLogger() - const ( resultQueueSize = 10 miningLogAtDepth = 5 diff --git a/node/node_test.go b/node/node_test.go index 95fa53b067..408d4cfcbc 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -218,7 +218,7 @@ func TestServiceRestarts(t *testing.T) { } defer stack.Stop() - if running || started != 1 { + if !running || started != 1 { t.Fatalf("running/started mismatch: have %v/%d, want true/1", running, started) } // Restart the stack a few times and check successful service restarts diff --git a/p2p/server.go b/p2p/server.go index cf9672e2d2..298148d3ee 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -54,8 +54,6 @@ const ( var errServerStopped = errors.New("server stopped") -var srvjslog = logger.NewJsonLogger() - // Config holds Server options. type Config struct { // This field must be set to a valid secp256k1 private key. @@ -737,12 +735,6 @@ func (srv *Server) checkpoint(c *conn, stage chan<- *conn) error { // the peer. func (srv *Server) runPeer(p *Peer) { glog.V(logger.Debug).Infof("Added %v\n", p) - srvjslog.LogJson(&logger.P2PConnected{ - RemoteId: p.ID().String(), - RemoteAddress: p.RemoteAddr().String(), - RemoteVersionString: p.Name(), - NumConnections: srv.PeerCount(), - }) if srv.newPeerHook != nil { srv.newPeerHook(p) @@ -753,10 +745,6 @@ func (srv *Server) runPeer(p *Peer) { srv.delpeer <- p glog.V(logger.Debug).Infof("Removed %v (%v)\n", p, discreason) - srvjslog.LogJson(&logger.P2PDisconnected{ - RemoteId: p.ID().String(), - NumConnections: srv.PeerCount(), - }) } // NodeInfo represents a short summary of the information known about the host. diff --git a/pow/dagger/dagger.go b/pow/dagger/dagger.go deleted file mode 100644 index f54ba71ca1..0000000000 --- a/pow/dagger/dagger.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package dagger - -import ( - "hash" - "math/big" - "math/rand" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto/sha3" - "github.com/ethereum/go-ethereum/logger" -) - -var powlogger = logger.NewLogger("POW") - -type Dagger struct { - hash *big.Int - xn *big.Int -} - -var Found bool - -func (dag *Dagger) Find(obj *big.Int, resChan chan int64) { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - - for i := 0; i < 1000; i++ { - rnd := r.Int63() - - res := dag.Eval(big.NewInt(rnd)) - powlogger.Infof("rnd %v\nres %v\nobj %v\n", rnd, res, obj) - if res.Cmp(obj) < 0 { - // Post back result on the channel - resChan <- rnd - // Notify other threads we've found a valid nonce - Found = true - } - - // Break out if found - if Found { - break - } - } - - resChan <- 0 -} - -func (dag *Dagger) Search(hash, diff *big.Int) (uint64, []byte) { - // TODO fix multi threading. Somehow it results in the wrong nonce - amountOfRoutines := 1 - - dag.hash = hash - - obj := common.BigPow(2, 256) - obj = obj.Div(obj, diff) - - Found = false - resChan := make(chan int64, 3) - var res int64 - - for k := 0; k < amountOfRoutines; k++ { - go dag.Find(obj, resChan) - - // Wait for each go routine to finish - } - for k := 0; k < amountOfRoutines; k++ { - // Get the result from the channel. 0 = quit - if r := <-resChan; r != 0 { - res = r - } - } - - return uint64(res), nil -} - -func (dag *Dagger) Verify(hash, diff, nonce *big.Int) bool { - dag.hash = hash - - obj := common.BigPow(2, 256) - obj = obj.Div(obj, diff) - - return dag.Eval(nonce).Cmp(obj) < 0 -} - -func DaggerVerify(hash, diff, nonce *big.Int) bool { - dagger := &Dagger{} - dagger.hash = hash - - obj := common.BigPow(2, 256) - obj = obj.Div(obj, diff) - - return dagger.Eval(nonce).Cmp(obj) < 0 -} - -func (dag *Dagger) Node(L uint64, i uint64) *big.Int { - if L == i { - return dag.hash - } - - var m *big.Int - if L == 9 { - m = big.NewInt(16) - } else { - m = big.NewInt(3) - } - - sha := sha3.NewKeccak256() - sha.Reset() - d := sha3.NewKeccak256() - b := new(big.Int) - ret := new(big.Int) - - for k := 0; k < int(m.Uint64()); k++ { - d.Reset() - d.Write(dag.hash.Bytes()) - d.Write(dag.xn.Bytes()) - d.Write(big.NewInt(int64(L)).Bytes()) - d.Write(big.NewInt(int64(i)).Bytes()) - d.Write(big.NewInt(int64(k)).Bytes()) - - b.SetBytes(Sum(d)) - pk := b.Uint64() & ((1 << ((L - 1) * 3)) - 1) - sha.Write(dag.Node(L-1, pk).Bytes()) - } - - ret.SetBytes(Sum(sha)) - - return ret -} - -func Sum(sha hash.Hash) []byte { - //in := make([]byte, 32) - return sha.Sum(nil) -} - -func (dag *Dagger) Eval(N *big.Int) *big.Int { - pow := common.BigPow(2, 26) - dag.xn = pow.Div(N, pow) - - sha := sha3.NewKeccak256() - sha.Reset() - ret := new(big.Int) - - for k := 0; k < 4; k++ { - d := sha3.NewKeccak256() - b := new(big.Int) - - d.Reset() - d.Write(dag.hash.Bytes()) - d.Write(dag.xn.Bytes()) - d.Write(N.Bytes()) - d.Write(big.NewInt(int64(k)).Bytes()) - - b.SetBytes(Sum(d)) - pk := (b.Uint64() & 0x1ffffff) - - sha.Write(dag.Node(9, pk).Bytes()) - } - - return ret.SetBytes(Sum(sha)) -} diff --git a/pow/dagger/dagger_test.go b/pow/dagger/dagger_test.go deleted file mode 100644 index 39b74df306..0000000000 --- a/pow/dagger/dagger_test.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package dagger - -import ( - "math/big" - "testing" - - "github.com/ethereum/go-ethereum/common" -) - -func BenchmarkDaggerSearch(b *testing.B) { - hash := big.NewInt(0) - diff := common.BigPow(2, 36) - o := big.NewInt(0) // nonce doesn't matter. We're only testing against speed, not validity - - // Reset timer so the big generation isn't included in the benchmark - b.ResetTimer() - // Validate - DaggerVerify(hash, diff, o) -} diff --git a/pow/ezp/pow.go b/pow/ezp/pow.go deleted file mode 100644 index 0f7ee35709..0000000000 --- a/pow/ezp/pow.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package ezp - -import ( - "encoding/binary" - "math/big" - "math/rand" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto/sha3" - "github.com/ethereum/go-ethereum/logger" - "github.com/ethereum/go-ethereum/pow" -) - -var powlogger = logger.NewLogger("POW") - -type EasyPow struct { - hash *big.Int - HashRate int64 - turbo bool -} - -func New() *EasyPow { - return &EasyPow{turbo: false} -} - -func (pow *EasyPow) GetHashrate() int64 { - return pow.HashRate -} - -func (pow *EasyPow) Turbo(on bool) { - pow.turbo = on -} - -func (pow *EasyPow) Search(block pow.Block, stop <-chan struct{}, index int) (uint64, []byte) { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - hash := block.HashNoNonce() - diff := block.Difficulty() - //i := int64(0) - // TODO fix offset - i := rand.Int63() - starti := i - start := time.Now().UnixNano() - - defer func() { pow.HashRate = 0 }() - - // Make sure stop is empty -empty: - for { - select { - case <-stop: - default: - break empty - } - } - - for { - select { - case <-stop: - return 0, nil - default: - i++ - - elapsed := time.Now().UnixNano() - start - hashes := ((float64(1e9) / float64(elapsed)) * float64(i-starti)) / 1000 - pow.HashRate = int64(hashes) - - sha := uint64(r.Int63()) - if verify(hash, diff, sha) { - return sha, nil - } - } - - if !pow.turbo { - time.Sleep(20 * time.Microsecond) - } - } -} - -func (pow *EasyPow) Verify(block pow.Block) bool { - return Verify(block) -} - -func verify(hash common.Hash, diff *big.Int, nonce uint64) bool { - sha := sha3.NewKeccak256() - n := make([]byte, 8) - binary.PutUvarint(n, nonce) - sha.Write(n) - sha.Write(hash[:]) - verification := new(big.Int).Div(common.BigPow(2, 256), diff) - res := common.BigD(sha.Sum(nil)) - return res.Cmp(verification) <= 0 -} - -func Verify(block pow.Block) bool { - return verify(block.HashNoNonce(), block.Difficulty(), block.Nonce()) -} diff --git a/swarm/network/protocol.go b/swarm/network/protocol.go index 4fffaac6d5..763fb0b8e2 100644 --- a/swarm/network/protocol.go +++ b/swarm/network/protocol.go @@ -538,13 +538,6 @@ func (self *bzz) protoError(code int, format string, params ...interface{}) (err return } -func (self *bzz) protoErrorDisconnect(err *errs.Error) { - err.Log(glog.V(logger.Info)) - if err.Fatal() { - self.peer.Disconnect(p2p.DiscSubprotocolError) - } -} - func (self *bzz) send(msg uint64, data interface{}) error { if self.hive.blockWrite { return fmt.Errorf("network write blocked")